VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100591

Last change on this file since 100591 was 100591, checked in by vboxsync, 17 months ago

VMM/IEM: Must pass the FPU opcode word to the various MCs updating FOP as IEMCPU::uFpuOpcode isn't available during recompiled code execution. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 471.4 KB
Line 
1/* $Id: IEMAll.cpp 100591 2023-07-15 01:20:13Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <iprt/asm-math.h>
130#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
131# include <iprt/asm-amd64-x86.h>
132#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
133# include <iprt/asm-arm.h>
134#endif
135#include <iprt/assert.h>
136#include <iprt/string.h>
137#include <iprt/x86.h>
138
139#include "IEMInline.h"
140
141
142/*********************************************************************************************************************************
143* Structures and Typedefs *
144*********************************************************************************************************************************/
145/**
146 * CPU exception classes.
147 */
148typedef enum IEMXCPTCLASS
149{
150 IEMXCPTCLASS_BENIGN,
151 IEMXCPTCLASS_CONTRIBUTORY,
152 IEMXCPTCLASS_PAGE_FAULT,
153 IEMXCPTCLASS_DOUBLE_FAULT
154} IEMXCPTCLASS;
155
156
157/*********************************************************************************************************************************
158* Global Variables *
159*********************************************************************************************************************************/
160#if defined(IEM_LOG_MEMORY_WRITES)
161/** What IEM just wrote. */
162uint8_t g_abIemWrote[256];
163/** How much IEM just wrote. */
164size_t g_cbIemWrote;
165#endif
166
167
168/*********************************************************************************************************************************
169* Internal Functions *
170*********************************************************************************************************************************/
171static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
172 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
173
174
175/**
176 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
177 * path.
178 *
179 * @returns IEM_F_BRK_PENDING_XXX or zero.
180 * @param pVCpu The cross context virtual CPU structure of the
181 * calling thread.
182 *
183 * @note Don't call directly, use iemCalcExecDbgFlags instead.
184 */
185uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
186{
187 uint32_t fExec = 0;
188
189 /*
190 * Process guest breakpoints.
191 */
192#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
193 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
194 { \
195 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
196 { \
197 case X86_DR7_RW_EO: \
198 fExec |= IEM_F_PENDING_BRK_INSTR; \
199 break; \
200 case X86_DR7_RW_WO: \
201 case X86_DR7_RW_RW: \
202 fExec |= IEM_F_PENDING_BRK_DATA; \
203 break; \
204 case X86_DR7_RW_IO: \
205 fExec |= IEM_F_PENDING_BRK_X86_IO; \
206 break; \
207 } \
208 } \
209 } while (0)
210
211 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
212 if (fGstDr7 & X86_DR7_ENABLED_MASK)
213 {
214 PROCESS_ONE_BP(fGstDr7, 0);
215 PROCESS_ONE_BP(fGstDr7, 1);
216 PROCESS_ONE_BP(fGstDr7, 2);
217 PROCESS_ONE_BP(fGstDr7, 3);
218 }
219
220 /*
221 * Process hypervisor breakpoints.
222 */
223 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
224 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
225 {
226 PROCESS_ONE_BP(fHyperDr7, 0);
227 PROCESS_ONE_BP(fHyperDr7, 1);
228 PROCESS_ONE_BP(fHyperDr7, 2);
229 PROCESS_ONE_BP(fHyperDr7, 3);
230 }
231
232 return fExec;
233}
234
235
236/**
237 * Initializes the decoder state.
238 *
239 * iemReInitDecoder is mostly a copy of this function.
240 *
241 * @param pVCpu The cross context virtual CPU structure of the
242 * calling thread.
243 * @param fExecOpts Optional execution flags:
244 * - IEM_F_BYPASS_HANDLERS
245 * - IEM_F_X86_DISREGARD_LOCK
246 */
247DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
248{
249 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
250 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
259
260 /* Execution state: */
261 uint32_t fExec;
262 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
263
264 /* Decoder state: */
265 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
267 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
268 {
269 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
270 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
271 }
272 else
273 {
274 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
275 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
276 }
277 pVCpu->iem.s.fPrefixes = 0;
278 pVCpu->iem.s.uRexReg = 0;
279 pVCpu->iem.s.uRexB = 0;
280 pVCpu->iem.s.uRexIndex = 0;
281 pVCpu->iem.s.idxPrefix = 0;
282 pVCpu->iem.s.uVex3rdReg = 0;
283 pVCpu->iem.s.uVexLength = 0;
284 pVCpu->iem.s.fEvexStuff = 0;
285 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
286#ifdef IEM_WITH_CODE_TLB
287 pVCpu->iem.s.pbInstrBuf = NULL;
288 pVCpu->iem.s.offInstrNextByte = 0;
289 pVCpu->iem.s.offCurInstrStart = 0;
290# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
291 pVCpu->iem.s.offOpcode = 0;
292# endif
293# ifdef VBOX_STRICT
294 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
295 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
296 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
297 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
298# endif
299#else
300 pVCpu->iem.s.offOpcode = 0;
301 pVCpu->iem.s.cbOpcode = 0;
302#endif
303 pVCpu->iem.s.offModRm = 0;
304 pVCpu->iem.s.cActiveMappings = 0;
305 pVCpu->iem.s.iNextMapping = 0;
306 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
307
308#ifdef DBGFTRACE_ENABLED
309 switch (IEM_GET_CPU_MODE(pVCpu))
310 {
311 case IEMMODE_64BIT:
312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
313 break;
314 case IEMMODE_32BIT:
315 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
316 break;
317 case IEMMODE_16BIT:
318 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
319 break;
320 }
321#endif
322}
323
324
325/**
326 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
327 *
328 * This is mostly a copy of iemInitDecoder.
329 *
330 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
331 */
332DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
333{
334 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
339 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
343
344 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
345 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
346 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
347
348 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
349 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
350 pVCpu->iem.s.enmEffAddrMode = enmMode;
351 if (enmMode != IEMMODE_64BIT)
352 {
353 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
354 pVCpu->iem.s.enmEffOpSize = enmMode;
355 }
356 else
357 {
358 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
359 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
360 }
361 pVCpu->iem.s.fPrefixes = 0;
362 pVCpu->iem.s.uRexReg = 0;
363 pVCpu->iem.s.uRexB = 0;
364 pVCpu->iem.s.uRexIndex = 0;
365 pVCpu->iem.s.idxPrefix = 0;
366 pVCpu->iem.s.uVex3rdReg = 0;
367 pVCpu->iem.s.uVexLength = 0;
368 pVCpu->iem.s.fEvexStuff = 0;
369 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
370#ifdef IEM_WITH_CODE_TLB
371 if (pVCpu->iem.s.pbInstrBuf)
372 {
373 uint64_t off = (enmMode == IEMMODE_64BIT
374 ? pVCpu->cpum.GstCtx.rip
375 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
376 - pVCpu->iem.s.uInstrBufPc;
377 if (off < pVCpu->iem.s.cbInstrBufTotal)
378 {
379 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
380 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
381 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
382 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
383 else
384 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
385 }
386 else
387 {
388 pVCpu->iem.s.pbInstrBuf = NULL;
389 pVCpu->iem.s.offInstrNextByte = 0;
390 pVCpu->iem.s.offCurInstrStart = 0;
391 pVCpu->iem.s.cbInstrBuf = 0;
392 pVCpu->iem.s.cbInstrBufTotal = 0;
393 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
394 }
395 }
396 else
397 {
398 pVCpu->iem.s.offInstrNextByte = 0;
399 pVCpu->iem.s.offCurInstrStart = 0;
400 pVCpu->iem.s.cbInstrBuf = 0;
401 pVCpu->iem.s.cbInstrBufTotal = 0;
402# ifdef VBOX_STRICT
403 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
404# endif
405 }
406# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
407 pVCpu->iem.s.offOpcode = 0;
408# endif
409#else /* !IEM_WITH_CODE_TLB */
410 pVCpu->iem.s.cbOpcode = 0;
411 pVCpu->iem.s.offOpcode = 0;
412#endif /* !IEM_WITH_CODE_TLB */
413 pVCpu->iem.s.offModRm = 0;
414 Assert(pVCpu->iem.s.cActiveMappings == 0);
415 pVCpu->iem.s.iNextMapping = 0;
416 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
417 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
418
419#ifdef DBGFTRACE_ENABLED
420 switch (enmMode)
421 {
422 case IEMMODE_64BIT:
423 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
424 break;
425 case IEMMODE_32BIT:
426 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
427 break;
428 case IEMMODE_16BIT:
429 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
430 break;
431 }
432#endif
433}
434
435
436
437/**
438 * Prefetch opcodes the first time when starting executing.
439 *
440 * @returns Strict VBox status code.
441 * @param pVCpu The cross context virtual CPU structure of the
442 * calling thread.
443 * @param fExecOpts Optional execution flags:
444 * - IEM_F_BYPASS_HANDLERS
445 * - IEM_F_X86_DISREGARD_LOCK
446 */
447static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
448{
449 iemInitDecoder(pVCpu, fExecOpts);
450
451#ifndef IEM_WITH_CODE_TLB
452 /*
453 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
454 *
455 * First translate CS:rIP to a physical address.
456 *
457 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
458 * all relevant bytes from the first page, as it ASSUMES it's only ever
459 * called for dealing with CS.LIM, page crossing and instructions that
460 * are too long.
461 */
462 uint32_t cbToTryRead;
463 RTGCPTR GCPtrPC;
464 if (IEM_IS_64BIT_CODE(pVCpu))
465 {
466 cbToTryRead = GUEST_PAGE_SIZE;
467 GCPtrPC = pVCpu->cpum.GstCtx.rip;
468 if (IEM_IS_CANONICAL(GCPtrPC))
469 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
470 else
471 return iemRaiseGeneralProtectionFault0(pVCpu);
472 }
473 else
474 {
475 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
476 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
477 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
478 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
479 else
480 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
481 if (cbToTryRead) { /* likely */ }
482 else /* overflowed */
483 {
484 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
485 cbToTryRead = UINT32_MAX;
486 }
487 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
488 Assert(GCPtrPC <= UINT32_MAX);
489 }
490
491 PGMPTWALK Walk;
492 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
493 if (RT_SUCCESS(rc))
494 Assert(Walk.fSucceeded); /* probable. */
495 else
496 {
497 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
498# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
499 if (Walk.fFailed & PGM_WALKFAIL_EPT)
500 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
501# endif
502 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
503 }
504 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
505 else
506 {
507 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
508# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
509 if (Walk.fFailed & PGM_WALKFAIL_EPT)
510 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
511# endif
512 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
513 }
514 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
515 else
516 {
517 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
518# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
519 if (Walk.fFailed & PGM_WALKFAIL_EPT)
520 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
521# endif
522 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
523 }
524 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
525 /** @todo Check reserved bits and such stuff. PGM is better at doing
526 * that, so do it when implementing the guest virtual address
527 * TLB... */
528
529 /*
530 * Read the bytes at this address.
531 */
532 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
533 if (cbToTryRead > cbLeftOnPage)
534 cbToTryRead = cbLeftOnPage;
535 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
536 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
537
538 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
539 {
540 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
541 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
542 { /* likely */ }
543 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
544 {
545 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
546 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
547 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
548 }
549 else
550 {
551 Log((RT_SUCCESS(rcStrict)
552 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
553 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
554 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
555 return rcStrict;
556 }
557 }
558 else
559 {
560 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
561 if (RT_SUCCESS(rc))
562 { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
566 GCPtrPC, GCPhys, rc, cbToTryRead));
567 return rc;
568 }
569 }
570 pVCpu->iem.s.cbOpcode = cbToTryRead;
571#endif /* !IEM_WITH_CODE_TLB */
572 return VINF_SUCCESS;
573}
574
575
576/**
577 * Invalidates the IEM TLBs.
578 *
579 * This is called internally as well as by PGM when moving GC mappings.
580 *
581 * @param pVCpu The cross context virtual CPU structure of the calling
582 * thread.
583 */
584VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
585{
586#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
587 Log10(("IEMTlbInvalidateAll\n"));
588# ifdef IEM_WITH_CODE_TLB
589 pVCpu->iem.s.cbInstrBufTotal = 0;
590 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
591 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
592 { /* very likely */ }
593 else
594 {
595 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
596 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
597 while (i-- > 0)
598 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
599 }
600# endif
601
602# ifdef IEM_WITH_DATA_TLB
603 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
604 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
605 { /* very likely */ }
606 else
607 {
608 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
609 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
610 while (i-- > 0)
611 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
612 }
613# endif
614#else
615 RT_NOREF(pVCpu);
616#endif
617}
618
619
620/**
621 * Invalidates a page in the TLBs.
622 *
623 * @param pVCpu The cross context virtual CPU structure of the calling
624 * thread.
625 * @param GCPtr The address of the page to invalidate
626 * @thread EMT(pVCpu)
627 */
628VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
629{
630#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
631 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
632 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
633 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
634 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
635
636# ifdef IEM_WITH_CODE_TLB
637 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
638 {
639 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
640 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
641 pVCpu->iem.s.cbInstrBufTotal = 0;
642 }
643# endif
644
645# ifdef IEM_WITH_DATA_TLB
646 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
647 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
648# endif
649#else
650 NOREF(pVCpu); NOREF(GCPtr);
651#endif
652}
653
654
655#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
656/**
657 * Invalid both TLBs slow fashion following a rollover.
658 *
659 * Worker for IEMTlbInvalidateAllPhysical,
660 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
661 * iemMemMapJmp and others.
662 *
663 * @thread EMT(pVCpu)
664 */
665static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
666{
667 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
668 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
669 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
670
671 unsigned i;
672# ifdef IEM_WITH_CODE_TLB
673 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
674 while (i-- > 0)
675 {
676 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
677 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
678 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
679 }
680# endif
681# ifdef IEM_WITH_DATA_TLB
682 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
683 while (i-- > 0)
684 {
685 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
686 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
687 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
688 }
689# endif
690
691}
692#endif
693
694
695/**
696 * Invalidates the host physical aspects of the IEM TLBs.
697 *
698 * This is called internally as well as by PGM when moving GC mappings.
699 *
700 * @param pVCpu The cross context virtual CPU structure of the calling
701 * thread.
702 * @note Currently not used.
703 */
704VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
705{
706#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
707 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
708 Log10(("IEMTlbInvalidateAllPhysical\n"));
709
710# ifdef IEM_WITH_CODE_TLB
711 pVCpu->iem.s.cbInstrBufTotal = 0;
712# endif
713 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
714 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
715 {
716 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
717 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
718 }
719 else
720 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
721#else
722 NOREF(pVCpu);
723#endif
724}
725
726
727/**
728 * Invalidates the host physical aspects of the IEM TLBs.
729 *
730 * This is called internally as well as by PGM when moving GC mappings.
731 *
732 * @param pVM The cross context VM structure.
733 * @param idCpuCaller The ID of the calling EMT if available to the caller,
734 * otherwise NIL_VMCPUID.
735 *
736 * @remarks Caller holds the PGM lock.
737 */
738VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
739{
740#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
741 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
742 if (pVCpuCaller)
743 VMCPU_ASSERT_EMT(pVCpuCaller);
744 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
745
746 VMCC_FOR_EACH_VMCPU(pVM)
747 {
748# ifdef IEM_WITH_CODE_TLB
749 if (pVCpuCaller == pVCpu)
750 pVCpu->iem.s.cbInstrBufTotal = 0;
751# endif
752
753 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
754 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
755 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
756 { /* likely */}
757 else if (pVCpuCaller == pVCpu)
758 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
759 else
760 {
761 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
762 continue;
763 }
764 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
765 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
766 }
767 VMCC_FOR_EACH_VMCPU_END(pVM);
768
769#else
770 RT_NOREF(pVM, idCpuCaller);
771#endif
772}
773
774
775/**
776 * Flushes the prefetch buffer, light version.
777 */
778void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
779{
780#ifndef IEM_WITH_CODE_TLB
781 pVCpu->iem.s.cbOpcode = cbInstr;
782#else
783 RT_NOREF(pVCpu, cbInstr);
784#endif
785}
786
787
788/**
789 * Flushes the prefetch buffer, heavy version.
790 */
791void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
792{
793#ifndef IEM_WITH_CODE_TLB
794 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
795#elif 1
796 pVCpu->iem.s.pbInstrBuf = NULL;
797 RT_NOREF(cbInstr);
798#else
799 RT_NOREF(pVCpu, cbInstr);
800#endif
801}
802
803
804
805#ifdef IEM_WITH_CODE_TLB
806
807/**
808 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
809 * failure and jumps.
810 *
811 * We end up here for a number of reasons:
812 * - pbInstrBuf isn't yet initialized.
813 * - Advancing beyond the buffer boundrary (e.g. cross page).
814 * - Advancing beyond the CS segment limit.
815 * - Fetching from non-mappable page (e.g. MMIO).
816 *
817 * @param pVCpu The cross context virtual CPU structure of the
818 * calling thread.
819 * @param pvDst Where to return the bytes.
820 * @param cbDst Number of bytes to read. A value of zero is
821 * allowed for initializing pbInstrBuf (the
822 * recompiler does this). In this case it is best
823 * to set pbInstrBuf to NULL prior to the call.
824 */
825void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
826{
827# ifdef IN_RING3
828 for (;;)
829 {
830 Assert(cbDst <= 8);
831 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
832
833 /*
834 * We might have a partial buffer match, deal with that first to make the
835 * rest simpler. This is the first part of the cross page/buffer case.
836 */
837 if (pVCpu->iem.s.pbInstrBuf != NULL)
838 {
839 if (offBuf < pVCpu->iem.s.cbInstrBuf)
840 {
841 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
842 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
843 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
844
845 cbDst -= cbCopy;
846 pvDst = (uint8_t *)pvDst + cbCopy;
847 offBuf += cbCopy;
848 pVCpu->iem.s.offInstrNextByte += offBuf;
849 }
850 }
851
852 /*
853 * Check segment limit, figuring how much we're allowed to access at this point.
854 *
855 * We will fault immediately if RIP is past the segment limit / in non-canonical
856 * territory. If we do continue, there are one or more bytes to read before we
857 * end up in trouble and we need to do that first before faulting.
858 */
859 RTGCPTR GCPtrFirst;
860 uint32_t cbMaxRead;
861 if (IEM_IS_64BIT_CODE(pVCpu))
862 {
863 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
864 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
865 { /* likely */ }
866 else
867 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
868 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
869 }
870 else
871 {
872 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
873 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
874 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
875 { /* likely */ }
876 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
877 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
878 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
879 if (cbMaxRead != 0)
880 { /* likely */ }
881 else
882 {
883 /* Overflowed because address is 0 and limit is max. */
884 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
885 cbMaxRead = X86_PAGE_SIZE;
886 }
887 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
888 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
889 if (cbMaxRead2 < cbMaxRead)
890 cbMaxRead = cbMaxRead2;
891 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
892 }
893
894 /*
895 * Get the TLB entry for this piece of code.
896 */
897 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
898 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
899 if (pTlbe->uTag == uTag)
900 {
901 /* likely when executing lots of code, otherwise unlikely */
902# ifdef VBOX_WITH_STATISTICS
903 pVCpu->iem.s.CodeTlb.cTlbHits++;
904# endif
905 }
906 else
907 {
908 pVCpu->iem.s.CodeTlb.cTlbMisses++;
909 PGMPTWALK Walk;
910 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
911 if (RT_FAILURE(rc))
912 {
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
914 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
915 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
916#endif
917 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
918 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
919 }
920
921 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
922 Assert(Walk.fSucceeded);
923 pTlbe->uTag = uTag;
924 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
925 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
926 pTlbe->GCPhys = Walk.GCPhys;
927 pTlbe->pbMappingR3 = NULL;
928 }
929
930 /*
931 * Check TLB page table level access flags.
932 */
933 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
934 {
935 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
936 {
937 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
938 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
939 }
940 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
941 {
942 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
943 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
944 }
945 }
946
947 /*
948 * Look up the physical page info if necessary.
949 */
950 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
951 { /* not necessary */ }
952 else
953 {
954 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
955 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
956 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
957 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
958 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
959 { /* likely */ }
960 else
961 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
962 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
963 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
964 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
965 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
966 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
967 }
968
969# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
970 /*
971 * Try do a direct read using the pbMappingR3 pointer.
972 */
973 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
974 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
975 {
976 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
977 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
978 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
979 {
980 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
981 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
982 }
983 else
984 {
985 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
986 if (cbInstr + (uint32_t)cbDst <= 15)
987 {
988 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
989 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
990 }
991 else
992 {
993 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
994 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
995 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
996 }
997 }
998 if (cbDst <= cbMaxRead)
999 {
1000 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1001 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1002 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1003 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1004 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1005 return;
1006 }
1007 pVCpu->iem.s.pbInstrBuf = NULL;
1008
1009 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1010 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1011 }
1012# else
1013# error "refactor as needed"
1014 /*
1015 * If there is no special read handling, so we can read a bit more and
1016 * put it in the prefetch buffer.
1017 */
1018 if ( cbDst < cbMaxRead
1019 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1020 {
1021 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1022 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1023 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1024 { /* likely */ }
1025 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1026 {
1027 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1028 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1029 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1030 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1031 }
1032 else
1033 {
1034 Log((RT_SUCCESS(rcStrict)
1035 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1036 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1037 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1038 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1039 }
1040 }
1041# endif
1042 /*
1043 * Special read handling, so only read exactly what's needed.
1044 * This is a highly unlikely scenario.
1045 */
1046 else
1047 {
1048 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1049
1050 /* Check instruction length. */
1051 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1052 if (RT_LIKELY(cbInstr + cbDst <= 15))
1053 { /* likely */ }
1054 else
1055 {
1056 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1057 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1058 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1059 }
1060
1061 /* Do the reading. */
1062 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1063 if (cbToRead > 0)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1066 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085
1086 /* Update the state and probably return. */
1087 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1088 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1089 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1090 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1091 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE;
1092 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1093 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1094 pVCpu->iem.s.pbInstrBuf = NULL;
1095 if (cbToRead == cbDst)
1096 return;
1097 }
1098
1099 /*
1100 * More to read, loop.
1101 */
1102 cbDst -= cbMaxRead;
1103 pvDst = (uint8_t *)pvDst + cbMaxRead;
1104 }
1105# else /* !IN_RING3 */
1106 RT_NOREF(pvDst, cbDst);
1107 if (pvDst || cbDst)
1108 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1109# endif /* !IN_RING3 */
1110}
1111
1112#else /* !IEM_WITH_CODE_TLB */
1113
1114/**
1115 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1116 * exception if it fails.
1117 *
1118 * @returns Strict VBox status code.
1119 * @param pVCpu The cross context virtual CPU structure of the
1120 * calling thread.
1121 * @param cbMin The minimum number of bytes relative offOpcode
1122 * that must be read.
1123 */
1124VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1125{
1126 /*
1127 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1128 *
1129 * First translate CS:rIP to a physical address.
1130 */
1131 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1132 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1133 uint8_t const cbLeft = cbOpcode - offOpcode;
1134 Assert(cbLeft < cbMin);
1135 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1136
1137 uint32_t cbToTryRead;
1138 RTGCPTR GCPtrNext;
1139 if (IEM_IS_64BIT_CODE(pVCpu))
1140 {
1141 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1142 if (!IEM_IS_CANONICAL(GCPtrNext))
1143 return iemRaiseGeneralProtectionFault0(pVCpu);
1144 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1145 }
1146 else
1147 {
1148 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1149 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1150 GCPtrNext32 += cbOpcode;
1151 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1152 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1153 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1154 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1155 if (!cbToTryRead) /* overflowed */
1156 {
1157 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1158 cbToTryRead = UINT32_MAX;
1159 /** @todo check out wrapping around the code segment. */
1160 }
1161 if (cbToTryRead < cbMin - cbLeft)
1162 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1163 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1164
1165 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1166 if (cbToTryRead > cbLeftOnPage)
1167 cbToTryRead = cbLeftOnPage;
1168 }
1169
1170 /* Restrict to opcode buffer space.
1171
1172 We're making ASSUMPTIONS here based on work done previously in
1173 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1174 be fetched in case of an instruction crossing two pages. */
1175 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1176 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1177 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1178 { /* likely */ }
1179 else
1180 {
1181 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1182 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1183 return iemRaiseGeneralProtectionFault0(pVCpu);
1184 }
1185
1186 PGMPTWALK Walk;
1187 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1188 if (RT_FAILURE(rc))
1189 {
1190 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1191#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1192 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1193 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1194#endif
1195 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1196 }
1197 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1198 {
1199 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1200#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1201 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1202 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1203#endif
1204 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1205 }
1206 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1207 {
1208 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1210 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1211 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1212#endif
1213 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1214 }
1215 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1216 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1217 /** @todo Check reserved bits and such stuff. PGM is better at doing
1218 * that, so do it when implementing the guest virtual address
1219 * TLB... */
1220
1221 /*
1222 * Read the bytes at this address.
1223 *
1224 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1225 * and since PATM should only patch the start of an instruction there
1226 * should be no need to check again here.
1227 */
1228 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1229 {
1230 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1231 cbToTryRead, PGMACCESSORIGIN_IEM);
1232 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1233 { /* likely */ }
1234 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1235 {
1236 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1237 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1238 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1239 }
1240 else
1241 {
1242 Log((RT_SUCCESS(rcStrict)
1243 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1244 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1245 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1246 return rcStrict;
1247 }
1248 }
1249 else
1250 {
1251 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1252 if (RT_SUCCESS(rc))
1253 { /* likely */ }
1254 else
1255 {
1256 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1257 return rc;
1258 }
1259 }
1260 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1261 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1262
1263 return VINF_SUCCESS;
1264}
1265
1266#endif /* !IEM_WITH_CODE_TLB */
1267#ifndef IEM_WITH_SETJMP
1268
1269/**
1270 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1271 *
1272 * @returns Strict VBox status code.
1273 * @param pVCpu The cross context virtual CPU structure of the
1274 * calling thread.
1275 * @param pb Where to return the opcode byte.
1276 */
1277VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1278{
1279 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1280 if (rcStrict == VINF_SUCCESS)
1281 {
1282 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1283 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1284 pVCpu->iem.s.offOpcode = offOpcode + 1;
1285 }
1286 else
1287 *pb = 0;
1288 return rcStrict;
1289}
1290
1291#else /* IEM_WITH_SETJMP */
1292
1293/**
1294 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1295 *
1296 * @returns The opcode byte.
1297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1298 */
1299uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1300{
1301# ifdef IEM_WITH_CODE_TLB
1302 uint8_t u8;
1303 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1304 return u8;
1305# else
1306 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1307 if (rcStrict == VINF_SUCCESS)
1308 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1309 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1310# endif
1311}
1312
1313#endif /* IEM_WITH_SETJMP */
1314
1315#ifndef IEM_WITH_SETJMP
1316
1317/**
1318 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1319 *
1320 * @returns Strict VBox status code.
1321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1322 * @param pu16 Where to return the opcode dword.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1325{
1326 uint8_t u8;
1327 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1328 if (rcStrict == VINF_SUCCESS)
1329 *pu16 = (int8_t)u8;
1330 return rcStrict;
1331}
1332
1333
1334/**
1335 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1336 *
1337 * @returns Strict VBox status code.
1338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1339 * @param pu32 Where to return the opcode dword.
1340 */
1341VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1342{
1343 uint8_t u8;
1344 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1345 if (rcStrict == VINF_SUCCESS)
1346 *pu32 = (int8_t)u8;
1347 return rcStrict;
1348}
1349
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1356 * @param pu64 Where to return the opcode qword.
1357 */
1358VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu64 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367#endif /* !IEM_WITH_SETJMP */
1368
1369
1370#ifndef IEM_WITH_SETJMP
1371
1372/**
1373 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1377 * @param pu16 Where to return the opcode word.
1378 */
1379VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1380{
1381 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1382 if (rcStrict == VINF_SUCCESS)
1383 {
1384 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1385# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1386 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1387# else
1388 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1389# endif
1390 pVCpu->iem.s.offOpcode = offOpcode + 2;
1391 }
1392 else
1393 *pu16 = 0;
1394 return rcStrict;
1395}
1396
1397#else /* IEM_WITH_SETJMP */
1398
1399/**
1400 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1401 *
1402 * @returns The opcode word.
1403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1404 */
1405uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1406{
1407# ifdef IEM_WITH_CODE_TLB
1408 uint16_t u16;
1409 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1410 return u16;
1411# else
1412 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1413 if (rcStrict == VINF_SUCCESS)
1414 {
1415 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1416 pVCpu->iem.s.offOpcode += 2;
1417# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1418 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1419# else
1420 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1421# endif
1422 }
1423 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1424# endif
1425}
1426
1427#endif /* IEM_WITH_SETJMP */
1428
1429#ifndef IEM_WITH_SETJMP
1430
1431/**
1432 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1433 *
1434 * @returns Strict VBox status code.
1435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1436 * @param pu32 Where to return the opcode double word.
1437 */
1438VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1439{
1440 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1441 if (rcStrict == VINF_SUCCESS)
1442 {
1443 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1444 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1445 pVCpu->iem.s.offOpcode = offOpcode + 2;
1446 }
1447 else
1448 *pu32 = 0;
1449 return rcStrict;
1450}
1451
1452
1453/**
1454 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1455 *
1456 * @returns Strict VBox status code.
1457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1458 * @param pu64 Where to return the opcode quad word.
1459 */
1460VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1461{
1462 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1463 if (rcStrict == VINF_SUCCESS)
1464 {
1465 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1466 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1467 pVCpu->iem.s.offOpcode = offOpcode + 2;
1468 }
1469 else
1470 *pu64 = 0;
1471 return rcStrict;
1472}
1473
1474#endif /* !IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode dword.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1492 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1493# else
1494 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1495 pVCpu->iem.s.abOpcode[offOpcode + 1],
1496 pVCpu->iem.s.abOpcode[offOpcode + 2],
1497 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1498# endif
1499 pVCpu->iem.s.offOpcode = offOpcode + 4;
1500 }
1501 else
1502 *pu32 = 0;
1503 return rcStrict;
1504}
1505
1506#else /* IEM_WITH_SETJMP */
1507
1508/**
1509 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1510 *
1511 * @returns The opcode dword.
1512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1513 */
1514uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1515{
1516# ifdef IEM_WITH_CODE_TLB
1517 uint32_t u32;
1518 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1519 return u32;
1520# else
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525 pVCpu->iem.s.offOpcode = offOpcode + 4;
1526# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1527 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1528# else
1529 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1530 pVCpu->iem.s.abOpcode[offOpcode + 1],
1531 pVCpu->iem.s.abOpcode[offOpcode + 2],
1532 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1533# endif
1534 }
1535 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1536# endif
1537}
1538
1539#endif /* IEM_WITH_SETJMP */
1540
1541#ifndef IEM_WITH_SETJMP
1542
1543/**
1544 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1545 *
1546 * @returns Strict VBox status code.
1547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1548 * @param pu64 Where to return the opcode dword.
1549 */
1550VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1551{
1552 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1553 if (rcStrict == VINF_SUCCESS)
1554 {
1555 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1556 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1557 pVCpu->iem.s.abOpcode[offOpcode + 1],
1558 pVCpu->iem.s.abOpcode[offOpcode + 2],
1559 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1560 pVCpu->iem.s.offOpcode = offOpcode + 4;
1561 }
1562 else
1563 *pu64 = 0;
1564 return rcStrict;
1565}
1566
1567
1568/**
1569 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1570 *
1571 * @returns Strict VBox status code.
1572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1573 * @param pu64 Where to return the opcode qword.
1574 */
1575VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1576{
1577 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1578 if (rcStrict == VINF_SUCCESS)
1579 {
1580 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1581 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1582 pVCpu->iem.s.abOpcode[offOpcode + 1],
1583 pVCpu->iem.s.abOpcode[offOpcode + 2],
1584 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1585 pVCpu->iem.s.offOpcode = offOpcode + 4;
1586 }
1587 else
1588 *pu64 = 0;
1589 return rcStrict;
1590}
1591
1592#endif /* !IEM_WITH_SETJMP */
1593
1594#ifndef IEM_WITH_SETJMP
1595
1596/**
1597 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1598 *
1599 * @returns Strict VBox status code.
1600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1601 * @param pu64 Where to return the opcode qword.
1602 */
1603VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1604{
1605 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1606 if (rcStrict == VINF_SUCCESS)
1607 {
1608 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1609# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1610 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1611# else
1612 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1613 pVCpu->iem.s.abOpcode[offOpcode + 1],
1614 pVCpu->iem.s.abOpcode[offOpcode + 2],
1615 pVCpu->iem.s.abOpcode[offOpcode + 3],
1616 pVCpu->iem.s.abOpcode[offOpcode + 4],
1617 pVCpu->iem.s.abOpcode[offOpcode + 5],
1618 pVCpu->iem.s.abOpcode[offOpcode + 6],
1619 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1620# endif
1621 pVCpu->iem.s.offOpcode = offOpcode + 8;
1622 }
1623 else
1624 *pu64 = 0;
1625 return rcStrict;
1626}
1627
1628#else /* IEM_WITH_SETJMP */
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1632 *
1633 * @returns The opcode qword.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 */
1636uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1637{
1638# ifdef IEM_WITH_CODE_TLB
1639 uint64_t u64;
1640 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1641 return u64;
1642# else
1643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1644 if (rcStrict == VINF_SUCCESS)
1645 {
1646 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1647 pVCpu->iem.s.offOpcode = offOpcode + 8;
1648# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1649 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1650# else
1651 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1652 pVCpu->iem.s.abOpcode[offOpcode + 1],
1653 pVCpu->iem.s.abOpcode[offOpcode + 2],
1654 pVCpu->iem.s.abOpcode[offOpcode + 3],
1655 pVCpu->iem.s.abOpcode[offOpcode + 4],
1656 pVCpu->iem.s.abOpcode[offOpcode + 5],
1657 pVCpu->iem.s.abOpcode[offOpcode + 6],
1658 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1659# endif
1660 }
1661 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1662# endif
1663}
1664
1665#endif /* IEM_WITH_SETJMP */
1666
1667
1668
1669/** @name Misc Worker Functions.
1670 * @{
1671 */
1672
1673/**
1674 * Gets the exception class for the specified exception vector.
1675 *
1676 * @returns The class of the specified exception.
1677 * @param uVector The exception vector.
1678 */
1679static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1680{
1681 Assert(uVector <= X86_XCPT_LAST);
1682 switch (uVector)
1683 {
1684 case X86_XCPT_DE:
1685 case X86_XCPT_TS:
1686 case X86_XCPT_NP:
1687 case X86_XCPT_SS:
1688 case X86_XCPT_GP:
1689 case X86_XCPT_SX: /* AMD only */
1690 return IEMXCPTCLASS_CONTRIBUTORY;
1691
1692 case X86_XCPT_PF:
1693 case X86_XCPT_VE: /* Intel only */
1694 return IEMXCPTCLASS_PAGE_FAULT;
1695
1696 case X86_XCPT_DF:
1697 return IEMXCPTCLASS_DOUBLE_FAULT;
1698 }
1699 return IEMXCPTCLASS_BENIGN;
1700}
1701
1702
1703/**
1704 * Evaluates how to handle an exception caused during delivery of another event
1705 * (exception / interrupt).
1706 *
1707 * @returns How to handle the recursive exception.
1708 * @param pVCpu The cross context virtual CPU structure of the
1709 * calling thread.
1710 * @param fPrevFlags The flags of the previous event.
1711 * @param uPrevVector The vector of the previous event.
1712 * @param fCurFlags The flags of the current exception.
1713 * @param uCurVector The vector of the current exception.
1714 * @param pfXcptRaiseInfo Where to store additional information about the
1715 * exception condition. Optional.
1716 */
1717VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1718 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1719{
1720 /*
1721 * Only CPU exceptions can be raised while delivering other events, software interrupt
1722 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1723 */
1724 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1725 Assert(pVCpu); RT_NOREF(pVCpu);
1726 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1727
1728 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1729 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1730 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1731 {
1732 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1733 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1734 {
1735 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1736 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1737 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1738 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1739 {
1740 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1741 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1742 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1743 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1744 uCurVector, pVCpu->cpum.GstCtx.cr2));
1745 }
1746 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1747 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1748 {
1749 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1750 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1751 }
1752 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1753 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1754 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1755 {
1756 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1757 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1758 }
1759 }
1760 else
1761 {
1762 if (uPrevVector == X86_XCPT_NMI)
1763 {
1764 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1765 if (uCurVector == X86_XCPT_PF)
1766 {
1767 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1768 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1769 }
1770 }
1771 else if ( uPrevVector == X86_XCPT_AC
1772 && uCurVector == X86_XCPT_AC)
1773 {
1774 enmRaise = IEMXCPTRAISE_CPU_HANG;
1775 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1776 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1777 }
1778 }
1779 }
1780 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1781 {
1782 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1783 if (uCurVector == X86_XCPT_PF)
1784 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1785 }
1786 else
1787 {
1788 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1789 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1790 }
1791
1792 if (pfXcptRaiseInfo)
1793 *pfXcptRaiseInfo = fRaiseInfo;
1794 return enmRaise;
1795}
1796
1797
1798/**
1799 * Enters the CPU shutdown state initiated by a triple fault or other
1800 * unrecoverable conditions.
1801 *
1802 * @returns Strict VBox status code.
1803 * @param pVCpu The cross context virtual CPU structure of the
1804 * calling thread.
1805 */
1806static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1807{
1808 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1809 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1810
1811 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1812 {
1813 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1814 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1815 }
1816
1817 RT_NOREF(pVCpu);
1818 return VINF_EM_TRIPLE_FAULT;
1819}
1820
1821
1822/**
1823 * Validates a new SS segment.
1824 *
1825 * @returns VBox strict status code.
1826 * @param pVCpu The cross context virtual CPU structure of the
1827 * calling thread.
1828 * @param NewSS The new SS selctor.
1829 * @param uCpl The CPL to load the stack for.
1830 * @param pDesc Where to return the descriptor.
1831 */
1832static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1833{
1834 /* Null selectors are not allowed (we're not called for dispatching
1835 interrupts with SS=0 in long mode). */
1836 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1837 {
1838 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1839 return iemRaiseTaskSwitchFault0(pVCpu);
1840 }
1841
1842 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1843 if ((NewSS & X86_SEL_RPL) != uCpl)
1844 {
1845 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1846 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1847 }
1848
1849 /*
1850 * Read the descriptor.
1851 */
1852 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1853 if (rcStrict != VINF_SUCCESS)
1854 return rcStrict;
1855
1856 /*
1857 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1858 */
1859 if (!pDesc->Legacy.Gen.u1DescType)
1860 {
1861 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1862 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1863 }
1864
1865 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1866 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1867 {
1868 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1869 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1870 }
1871 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876
1877 /* Is it there? */
1878 /** @todo testcase: Is this checked before the canonical / limit check below? */
1879 if (!pDesc->Legacy.Gen.u1Present)
1880 {
1881 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1882 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1883 }
1884
1885 return VINF_SUCCESS;
1886}
1887
1888/** @} */
1889
1890
1891/** @name Raising Exceptions.
1892 *
1893 * @{
1894 */
1895
1896
1897/**
1898 * Loads the specified stack far pointer from the TSS.
1899 *
1900 * @returns VBox strict status code.
1901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1902 * @param uCpl The CPL to load the stack for.
1903 * @param pSelSS Where to return the new stack segment.
1904 * @param puEsp Where to return the new stack pointer.
1905 */
1906static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1907{
1908 VBOXSTRICTRC rcStrict;
1909 Assert(uCpl < 4);
1910
1911 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1912 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1913 {
1914 /*
1915 * 16-bit TSS (X86TSS16).
1916 */
1917 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1918 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1919 {
1920 uint32_t off = uCpl * 4 + 2;
1921 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1922 {
1923 /** @todo check actual access pattern here. */
1924 uint32_t u32Tmp = 0; /* gcc maybe... */
1925 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1926 if (rcStrict == VINF_SUCCESS)
1927 {
1928 *puEsp = RT_LOWORD(u32Tmp);
1929 *pSelSS = RT_HIWORD(u32Tmp);
1930 return VINF_SUCCESS;
1931 }
1932 }
1933 else
1934 {
1935 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1936 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1937 }
1938 break;
1939 }
1940
1941 /*
1942 * 32-bit TSS (X86TSS32).
1943 */
1944 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1945 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1946 {
1947 uint32_t off = uCpl * 8 + 4;
1948 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1949 {
1950/** @todo check actual access pattern here. */
1951 uint64_t u64Tmp;
1952 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1953 if (rcStrict == VINF_SUCCESS)
1954 {
1955 *puEsp = u64Tmp & UINT32_MAX;
1956 *pSelSS = (RTSEL)(u64Tmp >> 32);
1957 return VINF_SUCCESS;
1958 }
1959 }
1960 else
1961 {
1962 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1963 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1964 }
1965 break;
1966 }
1967
1968 default:
1969 AssertFailed();
1970 rcStrict = VERR_IEM_IPE_4;
1971 break;
1972 }
1973
1974 *puEsp = 0; /* make gcc happy */
1975 *pSelSS = 0; /* make gcc happy */
1976 return rcStrict;
1977}
1978
1979
1980/**
1981 * Loads the specified stack pointer from the 64-bit TSS.
1982 *
1983 * @returns VBox strict status code.
1984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1985 * @param uCpl The CPL to load the stack for.
1986 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1987 * @param puRsp Where to return the new stack pointer.
1988 */
1989static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1990{
1991 Assert(uCpl < 4);
1992 Assert(uIst < 8);
1993 *puRsp = 0; /* make gcc happy */
1994
1995 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1996 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1997
1998 uint32_t off;
1999 if (uIst)
2000 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2001 else
2002 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2003 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2004 {
2005 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2006 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2007 }
2008
2009 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2010}
2011
2012
2013/**
2014 * Adjust the CPU state according to the exception being raised.
2015 *
2016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2017 * @param u8Vector The exception that has been raised.
2018 */
2019DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2020{
2021 switch (u8Vector)
2022 {
2023 case X86_XCPT_DB:
2024 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2025 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2026 break;
2027 /** @todo Read the AMD and Intel exception reference... */
2028 }
2029}
2030
2031
2032/**
2033 * Implements exceptions and interrupts for real mode.
2034 *
2035 * @returns VBox strict status code.
2036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2037 * @param cbInstr The number of bytes to offset rIP by in the return
2038 * address.
2039 * @param u8Vector The interrupt / exception vector number.
2040 * @param fFlags The flags.
2041 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2042 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2043 */
2044static VBOXSTRICTRC
2045iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2046 uint8_t cbInstr,
2047 uint8_t u8Vector,
2048 uint32_t fFlags,
2049 uint16_t uErr,
2050 uint64_t uCr2) RT_NOEXCEPT
2051{
2052 NOREF(uErr); NOREF(uCr2);
2053 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2054
2055 /*
2056 * Read the IDT entry.
2057 */
2058 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2059 {
2060 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2061 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2062 }
2063 RTFAR16 Idte;
2064 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2065 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2066 {
2067 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2068 return rcStrict;
2069 }
2070
2071 /*
2072 * Push the stack frame.
2073 */
2074 uint16_t *pu16Frame;
2075 uint64_t uNewRsp;
2076 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2077 if (rcStrict != VINF_SUCCESS)
2078 return rcStrict;
2079
2080 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2081#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2082 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2083 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2084 fEfl |= UINT16_C(0xf000);
2085#endif
2086 pu16Frame[2] = (uint16_t)fEfl;
2087 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2088 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2089 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2090 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2091 return rcStrict;
2092
2093 /*
2094 * Load the vector address into cs:ip and make exception specific state
2095 * adjustments.
2096 */
2097 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2098 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2099 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2100 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2101 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2102 pVCpu->cpum.GstCtx.rip = Idte.off;
2103 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2104 IEMMISC_SET_EFL(pVCpu, fEfl);
2105
2106 /** @todo do we actually do this in real mode? */
2107 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2108 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2109
2110 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2111 so best leave them alone in case we're in a weird kind of real mode... */
2112
2113 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2114}
2115
2116
2117/**
2118 * Loads a NULL data selector into when coming from V8086 mode.
2119 *
2120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2121 * @param pSReg Pointer to the segment register.
2122 */
2123DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2124{
2125 pSReg->Sel = 0;
2126 pSReg->ValidSel = 0;
2127 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2128 {
2129 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2130 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2131 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2132 }
2133 else
2134 {
2135 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2136 /** @todo check this on AMD-V */
2137 pSReg->u64Base = 0;
2138 pSReg->u32Limit = 0;
2139 }
2140}
2141
2142
2143/**
2144 * Loads a segment selector during a task switch in V8086 mode.
2145 *
2146 * @param pSReg Pointer to the segment register.
2147 * @param uSel The selector value to load.
2148 */
2149DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2150{
2151 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2152 pSReg->Sel = uSel;
2153 pSReg->ValidSel = uSel;
2154 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2155 pSReg->u64Base = uSel << 4;
2156 pSReg->u32Limit = 0xffff;
2157 pSReg->Attr.u = 0xf3;
2158}
2159
2160
2161/**
2162 * Loads a segment selector during a task switch in protected mode.
2163 *
2164 * In this task switch scenario, we would throw \#TS exceptions rather than
2165 * \#GPs.
2166 *
2167 * @returns VBox strict status code.
2168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2169 * @param pSReg Pointer to the segment register.
2170 * @param uSel The new selector value.
2171 *
2172 * @remarks This does _not_ handle CS or SS.
2173 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2174 */
2175static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2176{
2177 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2178
2179 /* Null data selector. */
2180 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2181 {
2182 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2184 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2185 return VINF_SUCCESS;
2186 }
2187
2188 /* Fetch the descriptor. */
2189 IEMSELDESC Desc;
2190 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2191 if (rcStrict != VINF_SUCCESS)
2192 {
2193 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2194 VBOXSTRICTRC_VAL(rcStrict)));
2195 return rcStrict;
2196 }
2197
2198 /* Must be a data segment or readable code segment. */
2199 if ( !Desc.Legacy.Gen.u1DescType
2200 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2201 {
2202 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2203 Desc.Legacy.Gen.u4Type));
2204 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2205 }
2206
2207 /* Check privileges for data segments and non-conforming code segments. */
2208 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2209 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2210 {
2211 /* The RPL and the new CPL must be less than or equal to the DPL. */
2212 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2213 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2214 {
2215 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2216 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2217 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2218 }
2219 }
2220
2221 /* Is it there? */
2222 if (!Desc.Legacy.Gen.u1Present)
2223 {
2224 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2225 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2226 }
2227
2228 /* The base and limit. */
2229 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2230 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2231
2232 /*
2233 * Ok, everything checked out fine. Now set the accessed bit before
2234 * committing the result into the registers.
2235 */
2236 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2237 {
2238 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2239 if (rcStrict != VINF_SUCCESS)
2240 return rcStrict;
2241 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2242 }
2243
2244 /* Commit */
2245 pSReg->Sel = uSel;
2246 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2247 pSReg->u32Limit = cbLimit;
2248 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2249 pSReg->ValidSel = uSel;
2250 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2251 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2252 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2253
2254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2255 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2256 return VINF_SUCCESS;
2257}
2258
2259
2260/**
2261 * Performs a task switch.
2262 *
2263 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2264 * caller is responsible for performing the necessary checks (like DPL, TSS
2265 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2266 * reference for JMP, CALL, IRET.
2267 *
2268 * If the task switch is the due to a software interrupt or hardware exception,
2269 * the caller is responsible for validating the TSS selector and descriptor. See
2270 * Intel Instruction reference for INT n.
2271 *
2272 * @returns VBox strict status code.
2273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2274 * @param enmTaskSwitch The cause of the task switch.
2275 * @param uNextEip The EIP effective after the task switch.
2276 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2277 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2278 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2279 * @param SelTSS The TSS selector of the new task.
2280 * @param pNewDescTSS Pointer to the new TSS descriptor.
2281 */
2282VBOXSTRICTRC
2283iemTaskSwitch(PVMCPUCC pVCpu,
2284 IEMTASKSWITCH enmTaskSwitch,
2285 uint32_t uNextEip,
2286 uint32_t fFlags,
2287 uint16_t uErr,
2288 uint64_t uCr2,
2289 RTSEL SelTSS,
2290 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2291{
2292 Assert(!IEM_IS_REAL_MODE(pVCpu));
2293 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2294 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2295
2296 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2297 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2298 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2299 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2300 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2301
2302 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2303 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2304
2305 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2306 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2307
2308 /* Update CR2 in case it's a page-fault. */
2309 /** @todo This should probably be done much earlier in IEM/PGM. See
2310 * @bugref{5653#c49}. */
2311 if (fFlags & IEM_XCPT_FLAGS_CR2)
2312 pVCpu->cpum.GstCtx.cr2 = uCr2;
2313
2314 /*
2315 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2316 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2317 */
2318 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2319 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2320 if (uNewTSSLimit < uNewTSSLimitMin)
2321 {
2322 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2323 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2324 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2325 }
2326
2327 /*
2328 * Task switches in VMX non-root mode always cause task switches.
2329 * The new TSS must have been read and validated (DPL, limits etc.) before a
2330 * task-switch VM-exit commences.
2331 *
2332 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2333 */
2334 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2335 {
2336 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2337 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2338 }
2339
2340 /*
2341 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2342 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2343 */
2344 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2345 {
2346 uint32_t const uExitInfo1 = SelTSS;
2347 uint32_t uExitInfo2 = uErr;
2348 switch (enmTaskSwitch)
2349 {
2350 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2351 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2352 default: break;
2353 }
2354 if (fFlags & IEM_XCPT_FLAGS_ERR)
2355 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2356 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2357 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2358
2359 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2360 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2361 RT_NOREF2(uExitInfo1, uExitInfo2);
2362 }
2363
2364 /*
2365 * Check the current TSS limit. The last written byte to the current TSS during the
2366 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2367 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2368 *
2369 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2370 * end up with smaller than "legal" TSS limits.
2371 */
2372 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2373 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2374 if (uCurTSSLimit < uCurTSSLimitMin)
2375 {
2376 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2377 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2378 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2379 }
2380
2381 /*
2382 * Verify that the new TSS can be accessed and map it. Map only the required contents
2383 * and not the entire TSS.
2384 */
2385 void *pvNewTSS;
2386 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2387 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2388 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2389 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2390 * not perform correct translation if this happens. See Intel spec. 7.2.1
2391 * "Task-State Segment". */
2392 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2393 if (rcStrict != VINF_SUCCESS)
2394 {
2395 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2396 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2397 return rcStrict;
2398 }
2399
2400 /*
2401 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2402 */
2403 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2404 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2405 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2406 {
2407 PX86DESC pDescCurTSS;
2408 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2409 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2410 if (rcStrict != VINF_SUCCESS)
2411 {
2412 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2413 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2414 return rcStrict;
2415 }
2416
2417 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2418 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2419 if (rcStrict != VINF_SUCCESS)
2420 {
2421 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2422 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2423 return rcStrict;
2424 }
2425
2426 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2427 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2428 {
2429 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2430 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2431 fEFlags &= ~X86_EFL_NT;
2432 }
2433 }
2434
2435 /*
2436 * Save the CPU state into the current TSS.
2437 */
2438 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2439 if (GCPtrNewTSS == GCPtrCurTSS)
2440 {
2441 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2442 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2443 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2444 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2445 pVCpu->cpum.GstCtx.ldtr.Sel));
2446 }
2447 if (fIsNewTSS386)
2448 {
2449 /*
2450 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2451 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2452 */
2453 void *pvCurTSS32;
2454 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2455 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2456 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2457 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2458 if (rcStrict != VINF_SUCCESS)
2459 {
2460 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2461 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2462 return rcStrict;
2463 }
2464
2465 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2466 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2467 pCurTSS32->eip = uNextEip;
2468 pCurTSS32->eflags = fEFlags;
2469 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2470 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2471 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2472 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2473 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2474 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2475 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2476 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2477 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2478 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2479 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2480 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2481 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2482 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2483
2484 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2485 if (rcStrict != VINF_SUCCESS)
2486 {
2487 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2488 VBOXSTRICTRC_VAL(rcStrict)));
2489 return rcStrict;
2490 }
2491 }
2492 else
2493 {
2494 /*
2495 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2496 */
2497 void *pvCurTSS16;
2498 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2499 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2500 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2501 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2502 if (rcStrict != VINF_SUCCESS)
2503 {
2504 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2505 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2506 return rcStrict;
2507 }
2508
2509 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2510 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2511 pCurTSS16->ip = uNextEip;
2512 pCurTSS16->flags = (uint16_t)fEFlags;
2513 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2514 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2515 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2516 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2517 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2518 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2519 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2520 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2521 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2522 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2523 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2524 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2525
2526 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2527 if (rcStrict != VINF_SUCCESS)
2528 {
2529 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2530 VBOXSTRICTRC_VAL(rcStrict)));
2531 return rcStrict;
2532 }
2533 }
2534
2535 /*
2536 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2537 */
2538 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2539 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2540 {
2541 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2542 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2543 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2544 }
2545
2546 /*
2547 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2548 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2549 */
2550 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2551 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2552 bool fNewDebugTrap;
2553 if (fIsNewTSS386)
2554 {
2555 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2556 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2557 uNewEip = pNewTSS32->eip;
2558 uNewEflags = pNewTSS32->eflags;
2559 uNewEax = pNewTSS32->eax;
2560 uNewEcx = pNewTSS32->ecx;
2561 uNewEdx = pNewTSS32->edx;
2562 uNewEbx = pNewTSS32->ebx;
2563 uNewEsp = pNewTSS32->esp;
2564 uNewEbp = pNewTSS32->ebp;
2565 uNewEsi = pNewTSS32->esi;
2566 uNewEdi = pNewTSS32->edi;
2567 uNewES = pNewTSS32->es;
2568 uNewCS = pNewTSS32->cs;
2569 uNewSS = pNewTSS32->ss;
2570 uNewDS = pNewTSS32->ds;
2571 uNewFS = pNewTSS32->fs;
2572 uNewGS = pNewTSS32->gs;
2573 uNewLdt = pNewTSS32->selLdt;
2574 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2575 }
2576 else
2577 {
2578 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2579 uNewCr3 = 0;
2580 uNewEip = pNewTSS16->ip;
2581 uNewEflags = pNewTSS16->flags;
2582 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2583 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2584 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2585 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2586 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2587 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2588 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2589 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2590 uNewES = pNewTSS16->es;
2591 uNewCS = pNewTSS16->cs;
2592 uNewSS = pNewTSS16->ss;
2593 uNewDS = pNewTSS16->ds;
2594 uNewFS = 0;
2595 uNewGS = 0;
2596 uNewLdt = pNewTSS16->selLdt;
2597 fNewDebugTrap = false;
2598 }
2599
2600 if (GCPtrNewTSS == GCPtrCurTSS)
2601 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2602 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2603
2604 /*
2605 * We're done accessing the new TSS.
2606 */
2607 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2608 if (rcStrict != VINF_SUCCESS)
2609 {
2610 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2611 return rcStrict;
2612 }
2613
2614 /*
2615 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2616 */
2617 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2618 {
2619 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2620 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2621 if (rcStrict != VINF_SUCCESS)
2622 {
2623 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2624 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2625 return rcStrict;
2626 }
2627
2628 /* Check that the descriptor indicates the new TSS is available (not busy). */
2629 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2630 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2631 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2632
2633 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2634 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2635 if (rcStrict != VINF_SUCCESS)
2636 {
2637 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2638 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2639 return rcStrict;
2640 }
2641 }
2642
2643 /*
2644 * From this point on, we're technically in the new task. We will defer exceptions
2645 * until the completion of the task switch but before executing any instructions in the new task.
2646 */
2647 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2648 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2649 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2650 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2651 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2652 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2653 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2654
2655 /* Set the busy bit in TR. */
2656 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2657
2658 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2659 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2660 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2661 {
2662 uNewEflags |= X86_EFL_NT;
2663 }
2664
2665 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2666 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2667 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2668
2669 pVCpu->cpum.GstCtx.eip = uNewEip;
2670 pVCpu->cpum.GstCtx.eax = uNewEax;
2671 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2672 pVCpu->cpum.GstCtx.edx = uNewEdx;
2673 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2674 pVCpu->cpum.GstCtx.esp = uNewEsp;
2675 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2676 pVCpu->cpum.GstCtx.esi = uNewEsi;
2677 pVCpu->cpum.GstCtx.edi = uNewEdi;
2678
2679 uNewEflags &= X86_EFL_LIVE_MASK;
2680 uNewEflags |= X86_EFL_RA1_MASK;
2681 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2682
2683 /*
2684 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2685 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2686 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2687 */
2688 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2689 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2690
2691 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2692 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2693
2694 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2695 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2696
2697 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2698 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2699
2700 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2701 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2702
2703 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2704 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2705 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2706
2707 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2708 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2709 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2710 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2711
2712 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2713 {
2714 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2715 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2716 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2717 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2718 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2719 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2720 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2721 }
2722
2723 /*
2724 * Switch CR3 for the new task.
2725 */
2726 if ( fIsNewTSS386
2727 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2728 {
2729 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2730 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2731 AssertRCSuccessReturn(rc, rc);
2732
2733 /* Inform PGM. */
2734 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2735 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2736 AssertRCReturn(rc, rc);
2737 /* ignore informational status codes */
2738
2739 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2740 }
2741
2742 /*
2743 * Switch LDTR for the new task.
2744 */
2745 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2746 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2747 else
2748 {
2749 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2750
2751 IEMSELDESC DescNewLdt;
2752 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2753 if (rcStrict != VINF_SUCCESS)
2754 {
2755 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2756 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2757 return rcStrict;
2758 }
2759 if ( !DescNewLdt.Legacy.Gen.u1Present
2760 || DescNewLdt.Legacy.Gen.u1DescType
2761 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2762 {
2763 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2764 uNewLdt, DescNewLdt.Legacy.u));
2765 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2766 }
2767
2768 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2769 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2770 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2771 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2773 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2774 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2776 }
2777
2778 IEMSELDESC DescSS;
2779 if (IEM_IS_V86_MODE(pVCpu))
2780 {
2781 IEM_SET_CPL(pVCpu, 3);
2782 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2783 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2784 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2785 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2786 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2787 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2788
2789 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2790 DescSS.Legacy.u = 0;
2791 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2792 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2793 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2794 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2795 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2796 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2797 DescSS.Legacy.Gen.u2Dpl = 3;
2798 }
2799 else
2800 {
2801 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2802
2803 /*
2804 * Load the stack segment for the new task.
2805 */
2806 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2807 {
2808 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2809 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2810 }
2811
2812 /* Fetch the descriptor. */
2813 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2814 if (rcStrict != VINF_SUCCESS)
2815 {
2816 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2817 VBOXSTRICTRC_VAL(rcStrict)));
2818 return rcStrict;
2819 }
2820
2821 /* SS must be a data segment and writable. */
2822 if ( !DescSS.Legacy.Gen.u1DescType
2823 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2824 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2825 {
2826 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2827 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2832 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2833 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2834 {
2835 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2836 uNewCpl));
2837 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2838 }
2839
2840 /* Is it there? */
2841 if (!DescSS.Legacy.Gen.u1Present)
2842 {
2843 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2844 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2845 }
2846
2847 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2848 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2849
2850 /* Set the accessed bit before committing the result into SS. */
2851 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2852 {
2853 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2854 if (rcStrict != VINF_SUCCESS)
2855 return rcStrict;
2856 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2857 }
2858
2859 /* Commit SS. */
2860 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2861 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2862 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2863 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2864 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2865 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2866 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2867
2868 /* CPL has changed, update IEM before loading rest of segments. */
2869 IEM_SET_CPL(pVCpu, uNewCpl);
2870
2871 /*
2872 * Load the data segments for the new task.
2873 */
2874 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2875 if (rcStrict != VINF_SUCCESS)
2876 return rcStrict;
2877 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2878 if (rcStrict != VINF_SUCCESS)
2879 return rcStrict;
2880 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2881 if (rcStrict != VINF_SUCCESS)
2882 return rcStrict;
2883 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2884 if (rcStrict != VINF_SUCCESS)
2885 return rcStrict;
2886
2887 /*
2888 * Load the code segment for the new task.
2889 */
2890 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2891 {
2892 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2894 }
2895
2896 /* Fetch the descriptor. */
2897 IEMSELDESC DescCS;
2898 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2899 if (rcStrict != VINF_SUCCESS)
2900 {
2901 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2902 return rcStrict;
2903 }
2904
2905 /* CS must be a code segment. */
2906 if ( !DescCS.Legacy.Gen.u1DescType
2907 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2908 {
2909 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2910 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2911 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2912 }
2913
2914 /* For conforming CS, DPL must be less than or equal to the RPL. */
2915 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2916 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2917 {
2918 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2919 DescCS.Legacy.Gen.u2Dpl));
2920 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2921 }
2922
2923 /* For non-conforming CS, DPL must match RPL. */
2924 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2925 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2926 {
2927 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2928 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2929 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2930 }
2931
2932 /* Is it there? */
2933 if (!DescCS.Legacy.Gen.u1Present)
2934 {
2935 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2936 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2937 }
2938
2939 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2940 u64Base = X86DESC_BASE(&DescCS.Legacy);
2941
2942 /* Set the accessed bit before committing the result into CS. */
2943 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2944 {
2945 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2946 if (rcStrict != VINF_SUCCESS)
2947 return rcStrict;
2948 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2949 }
2950
2951 /* Commit CS. */
2952 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2953 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2954 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2955 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2956 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2957 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2959 }
2960
2961 /* Make sure the CPU mode is correct. */
2962 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2963 if (fExecNew != pVCpu->iem.s.fExec)
2964 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2965 pVCpu->iem.s.fExec = fExecNew;
2966
2967 /** @todo Debug trap. */
2968 if (fIsNewTSS386 && fNewDebugTrap)
2969 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2970
2971 /*
2972 * Construct the error code masks based on what caused this task switch.
2973 * See Intel Instruction reference for INT.
2974 */
2975 uint16_t uExt;
2976 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2977 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2978 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2979 uExt = 1;
2980 else
2981 uExt = 0;
2982
2983 /*
2984 * Push any error code on to the new stack.
2985 */
2986 if (fFlags & IEM_XCPT_FLAGS_ERR)
2987 {
2988 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2989 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2990 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2991
2992 /* Check that there is sufficient space on the stack. */
2993 /** @todo Factor out segment limit checking for normal/expand down segments
2994 * into a separate function. */
2995 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2996 {
2997 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2998 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2999 {
3000 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3001 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3002 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3003 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3004 }
3005 }
3006 else
3007 {
3008 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3009 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3010 {
3011 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3012 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3013 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3014 }
3015 }
3016
3017
3018 if (fIsNewTSS386)
3019 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3020 else
3021 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3022 if (rcStrict != VINF_SUCCESS)
3023 {
3024 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3025 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3026 return rcStrict;
3027 }
3028 }
3029
3030 /* Check the new EIP against the new CS limit. */
3031 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3032 {
3033 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3034 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3035 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3036 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3037 }
3038
3039 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3040 pVCpu->cpum.GstCtx.ss.Sel));
3041 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3042}
3043
3044
3045/**
3046 * Implements exceptions and interrupts for protected mode.
3047 *
3048 * @returns VBox strict status code.
3049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3050 * @param cbInstr The number of bytes to offset rIP by in the return
3051 * address.
3052 * @param u8Vector The interrupt / exception vector number.
3053 * @param fFlags The flags.
3054 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3055 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3056 */
3057static VBOXSTRICTRC
3058iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3059 uint8_t cbInstr,
3060 uint8_t u8Vector,
3061 uint32_t fFlags,
3062 uint16_t uErr,
3063 uint64_t uCr2) RT_NOEXCEPT
3064{
3065 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3066
3067 /*
3068 * Read the IDT entry.
3069 */
3070 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3071 {
3072 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3073 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3074 }
3075 X86DESC Idte;
3076 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3077 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3078 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3079 {
3080 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3081 return rcStrict;
3082 }
3083 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3084 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3085 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3086
3087 /*
3088 * Check the descriptor type, DPL and such.
3089 * ASSUMES this is done in the same order as described for call-gate calls.
3090 */
3091 if (Idte.Gate.u1DescType)
3092 {
3093 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3094 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3095 }
3096 bool fTaskGate = false;
3097 uint8_t f32BitGate = true;
3098 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3099 switch (Idte.Gate.u4Type)
3100 {
3101 case X86_SEL_TYPE_SYS_UNDEFINED:
3102 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3103 case X86_SEL_TYPE_SYS_LDT:
3104 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3105 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3106 case X86_SEL_TYPE_SYS_UNDEFINED2:
3107 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3108 case X86_SEL_TYPE_SYS_UNDEFINED3:
3109 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3110 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3111 case X86_SEL_TYPE_SYS_UNDEFINED4:
3112 {
3113 /** @todo check what actually happens when the type is wrong...
3114 * esp. call gates. */
3115 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3116 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3117 }
3118
3119 case X86_SEL_TYPE_SYS_286_INT_GATE:
3120 f32BitGate = false;
3121 RT_FALL_THRU();
3122 case X86_SEL_TYPE_SYS_386_INT_GATE:
3123 fEflToClear |= X86_EFL_IF;
3124 break;
3125
3126 case X86_SEL_TYPE_SYS_TASK_GATE:
3127 fTaskGate = true;
3128#ifndef IEM_IMPLEMENTS_TASKSWITCH
3129 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3130#endif
3131 break;
3132
3133 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3134 f32BitGate = false;
3135 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3136 break;
3137
3138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3139 }
3140
3141 /* Check DPL against CPL if applicable. */
3142 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3143 {
3144 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3145 {
3146 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3147 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3148 }
3149 }
3150
3151 /* Is it there? */
3152 if (!Idte.Gate.u1Present)
3153 {
3154 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3155 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3156 }
3157
3158 /* Is it a task-gate? */
3159 if (fTaskGate)
3160 {
3161 /*
3162 * Construct the error code masks based on what caused this task switch.
3163 * See Intel Instruction reference for INT.
3164 */
3165 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3166 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3167 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3168 RTSEL SelTSS = Idte.Gate.u16Sel;
3169
3170 /*
3171 * Fetch the TSS descriptor in the GDT.
3172 */
3173 IEMSELDESC DescTSS;
3174 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3175 if (rcStrict != VINF_SUCCESS)
3176 {
3177 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3178 VBOXSTRICTRC_VAL(rcStrict)));
3179 return rcStrict;
3180 }
3181
3182 /* The TSS descriptor must be a system segment and be available (not busy). */
3183 if ( DescTSS.Legacy.Gen.u1DescType
3184 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3185 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3186 {
3187 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3188 u8Vector, SelTSS, DescTSS.Legacy.au64));
3189 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3190 }
3191
3192 /* The TSS must be present. */
3193 if (!DescTSS.Legacy.Gen.u1Present)
3194 {
3195 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3196 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3197 }
3198
3199 /* Do the actual task switch. */
3200 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3201 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3202 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3203 }
3204
3205 /* A null CS is bad. */
3206 RTSEL NewCS = Idte.Gate.u16Sel;
3207 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3208 {
3209 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3210 return iemRaiseGeneralProtectionFault0(pVCpu);
3211 }
3212
3213 /* Fetch the descriptor for the new CS. */
3214 IEMSELDESC DescCS;
3215 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3216 if (rcStrict != VINF_SUCCESS)
3217 {
3218 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3219 return rcStrict;
3220 }
3221
3222 /* Must be a code segment. */
3223 if (!DescCS.Legacy.Gen.u1DescType)
3224 {
3225 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3226 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3227 }
3228 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3229 {
3230 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3231 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3232 }
3233
3234 /* Don't allow lowering the privilege level. */
3235 /** @todo Does the lowering of privileges apply to software interrupts
3236 * only? This has bearings on the more-privileged or
3237 * same-privilege stack behavior further down. A testcase would
3238 * be nice. */
3239 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3240 {
3241 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3242 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3243 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3244 }
3245
3246 /* Make sure the selector is present. */
3247 if (!DescCS.Legacy.Gen.u1Present)
3248 {
3249 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3250 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3251 }
3252
3253 /* Check the new EIP against the new CS limit. */
3254 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3255 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3256 ? Idte.Gate.u16OffsetLow
3257 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3258 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3259 if (uNewEip > cbLimitCS)
3260 {
3261 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3262 u8Vector, uNewEip, cbLimitCS, NewCS));
3263 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3264 }
3265 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3266
3267 /* Calc the flag image to push. */
3268 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3269 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3270 fEfl &= ~X86_EFL_RF;
3271 else
3272 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3273
3274 /* From V8086 mode only go to CPL 0. */
3275 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3276 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3277 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3278 {
3279 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3280 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3281 }
3282
3283 /*
3284 * If the privilege level changes, we need to get a new stack from the TSS.
3285 * This in turns means validating the new SS and ESP...
3286 */
3287 if (uNewCpl != IEM_GET_CPL(pVCpu))
3288 {
3289 RTSEL NewSS;
3290 uint32_t uNewEsp;
3291 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3292 if (rcStrict != VINF_SUCCESS)
3293 return rcStrict;
3294
3295 IEMSELDESC DescSS;
3296 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3297 if (rcStrict != VINF_SUCCESS)
3298 return rcStrict;
3299 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3300 if (!DescSS.Legacy.Gen.u1DefBig)
3301 {
3302 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3303 uNewEsp = (uint16_t)uNewEsp;
3304 }
3305
3306 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3307
3308 /* Check that there is sufficient space for the stack frame. */
3309 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3310 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3311 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3312 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3313
3314 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3315 {
3316 if ( uNewEsp - 1 > cbLimitSS
3317 || uNewEsp < cbStackFrame)
3318 {
3319 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3320 u8Vector, NewSS, uNewEsp, cbStackFrame));
3321 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3322 }
3323 }
3324 else
3325 {
3326 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3327 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3328 {
3329 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3330 u8Vector, NewSS, uNewEsp, cbStackFrame));
3331 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3332 }
3333 }
3334
3335 /*
3336 * Start making changes.
3337 */
3338
3339 /* Set the new CPL so that stack accesses use it. */
3340 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3341 IEM_SET_CPL(pVCpu, uNewCpl);
3342
3343 /* Create the stack frame. */
3344 RTPTRUNION uStackFrame;
3345 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3346 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3347 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3348 if (rcStrict != VINF_SUCCESS)
3349 return rcStrict;
3350 void * const pvStackFrame = uStackFrame.pv;
3351 if (f32BitGate)
3352 {
3353 if (fFlags & IEM_XCPT_FLAGS_ERR)
3354 *uStackFrame.pu32++ = uErr;
3355 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3356 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3357 uStackFrame.pu32[2] = fEfl;
3358 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3359 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3360 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3361 if (fEfl & X86_EFL_VM)
3362 {
3363 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3364 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3365 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3366 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3367 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3368 }
3369 }
3370 else
3371 {
3372 if (fFlags & IEM_XCPT_FLAGS_ERR)
3373 *uStackFrame.pu16++ = uErr;
3374 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3375 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3376 uStackFrame.pu16[2] = fEfl;
3377 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3378 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3379 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3380 if (fEfl & X86_EFL_VM)
3381 {
3382 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3383 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3384 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3385 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3386 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3387 }
3388 }
3389 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3390 if (rcStrict != VINF_SUCCESS)
3391 return rcStrict;
3392
3393 /* Mark the selectors 'accessed' (hope this is the correct time). */
3394 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3395 * after pushing the stack frame? (Write protect the gdt + stack to
3396 * find out.) */
3397 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3398 {
3399 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3400 if (rcStrict != VINF_SUCCESS)
3401 return rcStrict;
3402 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3403 }
3404
3405 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3406 {
3407 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3408 if (rcStrict != VINF_SUCCESS)
3409 return rcStrict;
3410 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3411 }
3412
3413 /*
3414 * Start comitting the register changes (joins with the DPL=CPL branch).
3415 */
3416 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3417 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3418 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3419 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3420 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3421 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3422 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3423 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3424 * SP is loaded).
3425 * Need to check the other combinations too:
3426 * - 16-bit TSS, 32-bit handler
3427 * - 32-bit TSS, 16-bit handler */
3428 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3429 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3430 else
3431 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3432
3433 if (fEfl & X86_EFL_VM)
3434 {
3435 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3436 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3437 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3438 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3439 }
3440 }
3441 /*
3442 * Same privilege, no stack change and smaller stack frame.
3443 */
3444 else
3445 {
3446 uint64_t uNewRsp;
3447 RTPTRUNION uStackFrame;
3448 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3449 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3450 if (rcStrict != VINF_SUCCESS)
3451 return rcStrict;
3452 void * const pvStackFrame = uStackFrame.pv;
3453
3454 if (f32BitGate)
3455 {
3456 if (fFlags & IEM_XCPT_FLAGS_ERR)
3457 *uStackFrame.pu32++ = uErr;
3458 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3459 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3460 uStackFrame.pu32[2] = fEfl;
3461 }
3462 else
3463 {
3464 if (fFlags & IEM_XCPT_FLAGS_ERR)
3465 *uStackFrame.pu16++ = uErr;
3466 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3467 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3468 uStackFrame.pu16[2] = fEfl;
3469 }
3470 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3471 if (rcStrict != VINF_SUCCESS)
3472 return rcStrict;
3473
3474 /* Mark the CS selector as 'accessed'. */
3475 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3476 {
3477 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3478 if (rcStrict != VINF_SUCCESS)
3479 return rcStrict;
3480 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3481 }
3482
3483 /*
3484 * Start committing the register changes (joins with the other branch).
3485 */
3486 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3487 }
3488
3489 /* ... register committing continues. */
3490 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3491 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3492 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3493 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3494 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3495 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3496
3497 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3498 fEfl &= ~fEflToClear;
3499 IEMMISC_SET_EFL(pVCpu, fEfl);
3500
3501 if (fFlags & IEM_XCPT_FLAGS_CR2)
3502 pVCpu->cpum.GstCtx.cr2 = uCr2;
3503
3504 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3505 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3506
3507 /* Make sure the execution flags are correct. */
3508 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3509 if (fExecNew != pVCpu->iem.s.fExec)
3510 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3511 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3512 pVCpu->iem.s.fExec = fExecNew;
3513 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3514
3515 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3516}
3517
3518
3519/**
3520 * Implements exceptions and interrupts for long mode.
3521 *
3522 * @returns VBox strict status code.
3523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3524 * @param cbInstr The number of bytes to offset rIP by in the return
3525 * address.
3526 * @param u8Vector The interrupt / exception vector number.
3527 * @param fFlags The flags.
3528 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3529 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3530 */
3531static VBOXSTRICTRC
3532iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3533 uint8_t cbInstr,
3534 uint8_t u8Vector,
3535 uint32_t fFlags,
3536 uint16_t uErr,
3537 uint64_t uCr2) RT_NOEXCEPT
3538{
3539 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3540
3541 /*
3542 * Read the IDT entry.
3543 */
3544 uint16_t offIdt = (uint16_t)u8Vector << 4;
3545 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3546 {
3547 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3548 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3549 }
3550 X86DESC64 Idte;
3551#ifdef _MSC_VER /* Shut up silly compiler warning. */
3552 Idte.au64[0] = 0;
3553 Idte.au64[1] = 0;
3554#endif
3555 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3556 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3557 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3558 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3559 {
3560 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3561 return rcStrict;
3562 }
3563 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3564 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3565 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3566
3567 /*
3568 * Check the descriptor type, DPL and such.
3569 * ASSUMES this is done in the same order as described for call-gate calls.
3570 */
3571 if (Idte.Gate.u1DescType)
3572 {
3573 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3574 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3575 }
3576 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3577 switch (Idte.Gate.u4Type)
3578 {
3579 case AMD64_SEL_TYPE_SYS_INT_GATE:
3580 fEflToClear |= X86_EFL_IF;
3581 break;
3582 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3583 break;
3584
3585 default:
3586 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3587 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3588 }
3589
3590 /* Check DPL against CPL if applicable. */
3591 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3592 {
3593 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3594 {
3595 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3596 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3597 }
3598 }
3599
3600 /* Is it there? */
3601 if (!Idte.Gate.u1Present)
3602 {
3603 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3604 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3605 }
3606
3607 /* A null CS is bad. */
3608 RTSEL NewCS = Idte.Gate.u16Sel;
3609 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3610 {
3611 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3612 return iemRaiseGeneralProtectionFault0(pVCpu);
3613 }
3614
3615 /* Fetch the descriptor for the new CS. */
3616 IEMSELDESC DescCS;
3617 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3618 if (rcStrict != VINF_SUCCESS)
3619 {
3620 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3621 return rcStrict;
3622 }
3623
3624 /* Must be a 64-bit code segment. */
3625 if (!DescCS.Long.Gen.u1DescType)
3626 {
3627 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3628 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3629 }
3630 if ( !DescCS.Long.Gen.u1Long
3631 || DescCS.Long.Gen.u1DefBig
3632 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3633 {
3634 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3635 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3636 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3637 }
3638
3639 /* Don't allow lowering the privilege level. For non-conforming CS
3640 selectors, the CS.DPL sets the privilege level the trap/interrupt
3641 handler runs at. For conforming CS selectors, the CPL remains
3642 unchanged, but the CS.DPL must be <= CPL. */
3643 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3644 * when CPU in Ring-0. Result \#GP? */
3645 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3648 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3649 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3650 }
3651
3652
3653 /* Make sure the selector is present. */
3654 if (!DescCS.Legacy.Gen.u1Present)
3655 {
3656 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3657 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3658 }
3659
3660 /* Check that the new RIP is canonical. */
3661 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3662 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3663 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3664 if (!IEM_IS_CANONICAL(uNewRip))
3665 {
3666 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3667 return iemRaiseGeneralProtectionFault0(pVCpu);
3668 }
3669
3670 /*
3671 * If the privilege level changes or if the IST isn't zero, we need to get
3672 * a new stack from the TSS.
3673 */
3674 uint64_t uNewRsp;
3675 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3676 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3677 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3678 || Idte.Gate.u3IST != 0)
3679 {
3680 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3681 if (rcStrict != VINF_SUCCESS)
3682 return rcStrict;
3683 }
3684 else
3685 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3686 uNewRsp &= ~(uint64_t)0xf;
3687
3688 /*
3689 * Calc the flag image to push.
3690 */
3691 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3692 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3693 fEfl &= ~X86_EFL_RF;
3694 else
3695 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3696
3697 /*
3698 * Start making changes.
3699 */
3700 /* Set the new CPL so that stack accesses use it. */
3701 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3702 IEM_SET_CPL(pVCpu, uNewCpl);
3703/** @todo Setting CPL this early seems wrong as it would affect and errors we
3704 * raise accessing the stack and (?) GDT/LDT... */
3705
3706 /* Create the stack frame. */
3707 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3708 RTPTRUNION uStackFrame;
3709 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3710 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3711 if (rcStrict != VINF_SUCCESS)
3712 return rcStrict;
3713 void * const pvStackFrame = uStackFrame.pv;
3714
3715 if (fFlags & IEM_XCPT_FLAGS_ERR)
3716 *uStackFrame.pu64++ = uErr;
3717 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3718 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3719 uStackFrame.pu64[2] = fEfl;
3720 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3721 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3722 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3723 if (rcStrict != VINF_SUCCESS)
3724 return rcStrict;
3725
3726 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3727 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3728 * after pushing the stack frame? (Write protect the gdt + stack to
3729 * find out.) */
3730 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3731 {
3732 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3733 if (rcStrict != VINF_SUCCESS)
3734 return rcStrict;
3735 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3736 }
3737
3738 /*
3739 * Start comitting the register changes.
3740 */
3741 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3742 * hidden registers when interrupting 32-bit or 16-bit code! */
3743 if (uNewCpl != uOldCpl)
3744 {
3745 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3746 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3747 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3748 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3749 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3750 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3751 }
3752 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3753 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3754 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3755 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3756 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3757 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3758 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3759 pVCpu->cpum.GstCtx.rip = uNewRip;
3760
3761 fEfl &= ~fEflToClear;
3762 IEMMISC_SET_EFL(pVCpu, fEfl);
3763
3764 if (fFlags & IEM_XCPT_FLAGS_CR2)
3765 pVCpu->cpum.GstCtx.cr2 = uCr2;
3766
3767 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3768 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3769
3770 iemRecalcExecModeAndCplFlags(pVCpu);
3771
3772 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3773}
3774
3775
3776/**
3777 * Implements exceptions and interrupts.
3778 *
3779 * All exceptions and interrupts goes thru this function!
3780 *
3781 * @returns VBox strict status code.
3782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3783 * @param cbInstr The number of bytes to offset rIP by in the return
3784 * address.
3785 * @param u8Vector The interrupt / exception vector number.
3786 * @param fFlags The flags.
3787 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3788 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3789 */
3790VBOXSTRICTRC
3791iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3792 uint8_t cbInstr,
3793 uint8_t u8Vector,
3794 uint32_t fFlags,
3795 uint16_t uErr,
3796 uint64_t uCr2) RT_NOEXCEPT
3797{
3798 /*
3799 * Get all the state that we might need here.
3800 */
3801 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3802 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3803
3804#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3805 /*
3806 * Flush prefetch buffer
3807 */
3808 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3809#endif
3810
3811 /*
3812 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3813 */
3814 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3815 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3816 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3817 | IEM_XCPT_FLAGS_BP_INSTR
3818 | IEM_XCPT_FLAGS_ICEBP_INSTR
3819 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3820 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3821 {
3822 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3823 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3824 u8Vector = X86_XCPT_GP;
3825 uErr = 0;
3826 }
3827#ifdef DBGFTRACE_ENABLED
3828 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3829 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3830 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3831#endif
3832
3833 /*
3834 * Evaluate whether NMI blocking should be in effect.
3835 * Normally, NMI blocking is in effect whenever we inject an NMI.
3836 */
3837 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3838 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3839
3840#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3841 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3842 {
3843 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3844 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3845 return rcStrict0;
3846
3847 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3848 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3849 {
3850 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3851 fBlockNmi = false;
3852 }
3853 }
3854#endif
3855
3856#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3857 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3858 {
3859 /*
3860 * If the event is being injected as part of VMRUN, it isn't subject to event
3861 * intercepts in the nested-guest. However, secondary exceptions that occur
3862 * during injection of any event -are- subject to exception intercepts.
3863 *
3864 * See AMD spec. 15.20 "Event Injection".
3865 */
3866 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3867 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3868 else
3869 {
3870 /*
3871 * Check and handle if the event being raised is intercepted.
3872 */
3873 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3874 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3875 return rcStrict0;
3876 }
3877 }
3878#endif
3879
3880 /*
3881 * Set NMI blocking if necessary.
3882 */
3883 if (fBlockNmi)
3884 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3885
3886 /*
3887 * Do recursion accounting.
3888 */
3889 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3890 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3891 if (pVCpu->iem.s.cXcptRecursions == 0)
3892 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3893 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3894 else
3895 {
3896 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3897 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3898 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3899
3900 if (pVCpu->iem.s.cXcptRecursions >= 4)
3901 {
3902#ifdef DEBUG_bird
3903 AssertFailed();
3904#endif
3905 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3906 }
3907
3908 /*
3909 * Evaluate the sequence of recurring events.
3910 */
3911 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3912 NULL /* pXcptRaiseInfo */);
3913 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3914 { /* likely */ }
3915 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3916 {
3917 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3918 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3919 u8Vector = X86_XCPT_DF;
3920 uErr = 0;
3921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3922 /* VMX nested-guest #DF intercept needs to be checked here. */
3923 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3924 {
3925 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3926 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3927 return rcStrict0;
3928 }
3929#endif
3930 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3931 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3932 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3933 }
3934 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3935 {
3936 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3937 return iemInitiateCpuShutdown(pVCpu);
3938 }
3939 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3940 {
3941 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3942 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3943 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3944 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3945 return VERR_EM_GUEST_CPU_HANG;
3946 }
3947 else
3948 {
3949 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3950 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3951 return VERR_IEM_IPE_9;
3952 }
3953
3954 /*
3955 * The 'EXT' bit is set when an exception occurs during deliver of an external
3956 * event (such as an interrupt or earlier exception)[1]. Privileged software
3957 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3958 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3959 *
3960 * [1] - Intel spec. 6.13 "Error Code"
3961 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3962 * [3] - Intel Instruction reference for INT n.
3963 */
3964 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3965 && (fFlags & IEM_XCPT_FLAGS_ERR)
3966 && u8Vector != X86_XCPT_PF
3967 && u8Vector != X86_XCPT_DF)
3968 {
3969 uErr |= X86_TRAP_ERR_EXTERNAL;
3970 }
3971 }
3972
3973 pVCpu->iem.s.cXcptRecursions++;
3974 pVCpu->iem.s.uCurXcpt = u8Vector;
3975 pVCpu->iem.s.fCurXcpt = fFlags;
3976 pVCpu->iem.s.uCurXcptErr = uErr;
3977 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3978
3979 /*
3980 * Extensive logging.
3981 */
3982#if defined(LOG_ENABLED) && defined(IN_RING3)
3983 if (LogIs3Enabled())
3984 {
3985 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3986 PVM pVM = pVCpu->CTX_SUFF(pVM);
3987 char szRegs[4096];
3988 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3989 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3990 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3991 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3992 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3993 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3994 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3995 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3996 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3997 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3998 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3999 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4000 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4001 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4002 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4003 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4004 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4005 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4006 " efer=%016VR{efer}\n"
4007 " pat=%016VR{pat}\n"
4008 " sf_mask=%016VR{sf_mask}\n"
4009 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4010 " lstar=%016VR{lstar}\n"
4011 " star=%016VR{star} cstar=%016VR{cstar}\n"
4012 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4013 );
4014
4015 char szInstr[256];
4016 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4017 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4018 szInstr, sizeof(szInstr), NULL);
4019 Log3(("%s%s\n", szRegs, szInstr));
4020 }
4021#endif /* LOG_ENABLED */
4022
4023 /*
4024 * Stats.
4025 */
4026 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4027 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4028 else if (u8Vector <= X86_XCPT_LAST)
4029 {
4030 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4031 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4032 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4033 }
4034
4035 /*
4036 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4037 * to ensure that a stale TLB or paging cache entry will only cause one
4038 * spurious #PF.
4039 */
4040 if ( u8Vector == X86_XCPT_PF
4041 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4042 IEMTlbInvalidatePage(pVCpu, uCr2);
4043
4044 /*
4045 * Call the mode specific worker function.
4046 */
4047 VBOXSTRICTRC rcStrict;
4048 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4049 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4050 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4051 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4052 else
4053 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4054
4055 /* Flush the prefetch buffer. */
4056#ifdef IEM_WITH_CODE_TLB
4057 pVCpu->iem.s.pbInstrBuf = NULL;
4058#else
4059 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4060#endif
4061
4062 /*
4063 * Unwind.
4064 */
4065 pVCpu->iem.s.cXcptRecursions--;
4066 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4067 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4068 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4069 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4070 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4071 return rcStrict;
4072}
4073
4074#ifdef IEM_WITH_SETJMP
4075/**
4076 * See iemRaiseXcptOrInt. Will not return.
4077 */
4078DECL_NO_RETURN(void)
4079iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4080 uint8_t cbInstr,
4081 uint8_t u8Vector,
4082 uint32_t fFlags,
4083 uint16_t uErr,
4084 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4085{
4086 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4087 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4088}
4089#endif
4090
4091
4092/** \#DE - 00. */
4093VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4094{
4095 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4096}
4097
4098
4099/** \#DB - 01.
4100 * @note This automatically clear DR7.GD. */
4101VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4102{
4103 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4104 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4105 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4106}
4107
4108
4109/** \#BR - 05. */
4110VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4111{
4112 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4113}
4114
4115
4116/** \#UD - 06. */
4117VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4118{
4119 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4120}
4121
4122
4123/** \#NM - 07. */
4124VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4125{
4126 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4127}
4128
4129
4130/** \#TS(err) - 0a. */
4131VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4132{
4133 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4134}
4135
4136
4137/** \#TS(tr) - 0a. */
4138VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4139{
4140 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4141 pVCpu->cpum.GstCtx.tr.Sel, 0);
4142}
4143
4144
4145/** \#TS(0) - 0a. */
4146VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4147{
4148 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4149 0, 0);
4150}
4151
4152
4153/** \#TS(err) - 0a. */
4154VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4155{
4156 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4157 uSel & X86_SEL_MASK_OFF_RPL, 0);
4158}
4159
4160
4161/** \#NP(err) - 0b. */
4162VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4163{
4164 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4165}
4166
4167
4168/** \#NP(sel) - 0b. */
4169VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4170{
4171 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4172 uSel & ~X86_SEL_RPL, 0);
4173}
4174
4175
4176/** \#SS(seg) - 0c. */
4177VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4178{
4179 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4180 uSel & ~X86_SEL_RPL, 0);
4181}
4182
4183
4184/** \#SS(err) - 0c. */
4185VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4186{
4187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4188}
4189
4190
4191/** \#GP(n) - 0d. */
4192VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4193{
4194 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4195}
4196
4197
4198/** \#GP(0) - 0d. */
4199VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4200{
4201 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4202}
4203
4204#ifdef IEM_WITH_SETJMP
4205/** \#GP(0) - 0d. */
4206DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4207{
4208 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4209}
4210#endif
4211
4212
4213/** \#GP(sel) - 0d. */
4214VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4215{
4216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4217 Sel & ~X86_SEL_RPL, 0);
4218}
4219
4220
4221/** \#GP(0) - 0d. */
4222VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4223{
4224 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4225}
4226
4227
4228/** \#GP(sel) - 0d. */
4229VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4230{
4231 NOREF(iSegReg); NOREF(fAccess);
4232 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4233 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4234}
4235
4236#ifdef IEM_WITH_SETJMP
4237/** \#GP(sel) - 0d, longjmp. */
4238DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4239{
4240 NOREF(iSegReg); NOREF(fAccess);
4241 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4242 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4243}
4244#endif
4245
4246/** \#GP(sel) - 0d. */
4247VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4248{
4249 NOREF(Sel);
4250 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4251}
4252
4253#ifdef IEM_WITH_SETJMP
4254/** \#GP(sel) - 0d, longjmp. */
4255DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4256{
4257 NOREF(Sel);
4258 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4259}
4260#endif
4261
4262
4263/** \#GP(sel) - 0d. */
4264VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4265{
4266 NOREF(iSegReg); NOREF(fAccess);
4267 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4268}
4269
4270#ifdef IEM_WITH_SETJMP
4271/** \#GP(sel) - 0d, longjmp. */
4272DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4273{
4274 NOREF(iSegReg); NOREF(fAccess);
4275 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4276}
4277#endif
4278
4279
4280/** \#PF(n) - 0e. */
4281VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4282{
4283 uint16_t uErr;
4284 switch (rc)
4285 {
4286 case VERR_PAGE_NOT_PRESENT:
4287 case VERR_PAGE_TABLE_NOT_PRESENT:
4288 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4289 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4290 uErr = 0;
4291 break;
4292
4293 default:
4294 AssertMsgFailed(("%Rrc\n", rc));
4295 RT_FALL_THRU();
4296 case VERR_ACCESS_DENIED:
4297 uErr = X86_TRAP_PF_P;
4298 break;
4299
4300 /** @todo reserved */
4301 }
4302
4303 if (IEM_GET_CPL(pVCpu) == 3)
4304 uErr |= X86_TRAP_PF_US;
4305
4306 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4307 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4308 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4309 uErr |= X86_TRAP_PF_ID;
4310
4311#if 0 /* This is so much non-sense, really. Why was it done like that? */
4312 /* Note! RW access callers reporting a WRITE protection fault, will clear
4313 the READ flag before calling. So, read-modify-write accesses (RW)
4314 can safely be reported as READ faults. */
4315 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4316 uErr |= X86_TRAP_PF_RW;
4317#else
4318 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4319 {
4320 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4321 /// (regardless of outcome of the comparison in the latter case).
4322 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4323 uErr |= X86_TRAP_PF_RW;
4324 }
4325#endif
4326
4327 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4328 of the memory operand rather than at the start of it. (Not sure what
4329 happens if it crosses a page boundrary.) The current heuristics for
4330 this is to report the #PF for the last byte if the access is more than
4331 64 bytes. This is probably not correct, but we can work that out later,
4332 main objective now is to get FXSAVE to work like for real hardware and
4333 make bs3-cpu-basic2 work. */
4334 if (cbAccess <= 64)
4335 { /* likely*/ }
4336 else
4337 GCPtrWhere += cbAccess - 1;
4338
4339 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4340 uErr, GCPtrWhere);
4341}
4342
4343#ifdef IEM_WITH_SETJMP
4344/** \#PF(n) - 0e, longjmp. */
4345DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4346 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4347{
4348 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4349}
4350#endif
4351
4352
4353/** \#MF(0) - 10. */
4354VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4355{
4356 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4357 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4358
4359 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4360 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4361 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4362}
4363
4364
4365/** \#AC(0) - 11. */
4366VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4367{
4368 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4369}
4370
4371#ifdef IEM_WITH_SETJMP
4372/** \#AC(0) - 11, longjmp. */
4373DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4374{
4375 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4376}
4377#endif
4378
4379
4380/** \#XF(0)/\#XM(0) - 19. */
4381VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4382{
4383 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4384}
4385
4386
4387/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4388IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4389{
4390 NOREF(cbInstr);
4391 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4392}
4393
4394
4395/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4396IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4397{
4398 NOREF(cbInstr);
4399 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4400}
4401
4402
4403/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4404IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4405{
4406 NOREF(cbInstr);
4407 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4408}
4409
4410
4411/** @} */
4412
4413/** @name Common opcode decoders.
4414 * @{
4415 */
4416//#include <iprt/mem.h>
4417
4418/**
4419 * Used to add extra details about a stub case.
4420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4421 */
4422void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4423{
4424#if defined(LOG_ENABLED) && defined(IN_RING3)
4425 PVM pVM = pVCpu->CTX_SUFF(pVM);
4426 char szRegs[4096];
4427 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4428 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4429 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4430 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4431 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4432 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4433 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4434 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4435 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4436 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4437 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4438 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4439 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4440 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4441 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4442 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4443 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4444 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4445 " efer=%016VR{efer}\n"
4446 " pat=%016VR{pat}\n"
4447 " sf_mask=%016VR{sf_mask}\n"
4448 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4449 " lstar=%016VR{lstar}\n"
4450 " star=%016VR{star} cstar=%016VR{cstar}\n"
4451 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4452 );
4453
4454 char szInstr[256];
4455 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4456 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4457 szInstr, sizeof(szInstr), NULL);
4458
4459 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4460#else
4461 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4462#endif
4463}
4464
4465/** @} */
4466
4467
4468
4469/** @name Register Access.
4470 * @{
4471 */
4472
4473/**
4474 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4475 *
4476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4477 * segment limit.
4478 *
4479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4480 * @param cbInstr Instruction size.
4481 * @param offNextInstr The offset of the next instruction.
4482 * @param enmEffOpSize Effective operand size.
4483 */
4484VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4485 IEMMODE enmEffOpSize) RT_NOEXCEPT
4486{
4487 switch (enmEffOpSize)
4488 {
4489 case IEMMODE_16BIT:
4490 {
4491 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4492 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4493 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4494 pVCpu->cpum.GstCtx.rip = uNewIp;
4495 else
4496 return iemRaiseGeneralProtectionFault0(pVCpu);
4497 break;
4498 }
4499
4500 case IEMMODE_32BIT:
4501 {
4502 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4503 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4504
4505 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4506 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4507 pVCpu->cpum.GstCtx.rip = uNewEip;
4508 else
4509 return iemRaiseGeneralProtectionFault0(pVCpu);
4510 break;
4511 }
4512
4513 case IEMMODE_64BIT:
4514 {
4515 Assert(IEM_IS_64BIT_CODE(pVCpu));
4516
4517 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4518 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4519 pVCpu->cpum.GstCtx.rip = uNewRip;
4520 else
4521 return iemRaiseGeneralProtectionFault0(pVCpu);
4522 break;
4523 }
4524
4525 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4526 }
4527
4528#ifndef IEM_WITH_CODE_TLB
4529 /* Flush the prefetch buffer. */
4530 pVCpu->iem.s.cbOpcode = cbInstr;
4531#endif
4532
4533 /*
4534 * Clear RF and finish the instruction (maybe raise #DB).
4535 */
4536 return iemRegFinishClearingRF(pVCpu);
4537}
4538
4539
4540/**
4541 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4542 *
4543 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4544 * segment limit.
4545 *
4546 * @returns Strict VBox status code.
4547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4548 * @param cbInstr Instruction size.
4549 * @param offNextInstr The offset of the next instruction.
4550 */
4551VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4552{
4553 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4554
4555 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4556 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4557 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4558 pVCpu->cpum.GstCtx.rip = uNewIp;
4559 else
4560 return iemRaiseGeneralProtectionFault0(pVCpu);
4561
4562#ifndef IEM_WITH_CODE_TLB
4563 /* Flush the prefetch buffer. */
4564 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4565#endif
4566
4567 /*
4568 * Clear RF and finish the instruction (maybe raise #DB).
4569 */
4570 return iemRegFinishClearingRF(pVCpu);
4571}
4572
4573
4574/**
4575 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4576 *
4577 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4578 * segment limit.
4579 *
4580 * @returns Strict VBox status code.
4581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4582 * @param cbInstr Instruction size.
4583 * @param offNextInstr The offset of the next instruction.
4584 * @param enmEffOpSize Effective operand size.
4585 */
4586VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4587 IEMMODE enmEffOpSize) RT_NOEXCEPT
4588{
4589 if (enmEffOpSize == IEMMODE_32BIT)
4590 {
4591 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4592
4593 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4594 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4595 pVCpu->cpum.GstCtx.rip = uNewEip;
4596 else
4597 return iemRaiseGeneralProtectionFault0(pVCpu);
4598 }
4599 else
4600 {
4601 Assert(enmEffOpSize == IEMMODE_64BIT);
4602
4603 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4604 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4605 pVCpu->cpum.GstCtx.rip = uNewRip;
4606 else
4607 return iemRaiseGeneralProtectionFault0(pVCpu);
4608 }
4609
4610#ifndef IEM_WITH_CODE_TLB
4611 /* Flush the prefetch buffer. */
4612 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4613#endif
4614
4615 /*
4616 * Clear RF and finish the instruction (maybe raise #DB).
4617 */
4618 return iemRegFinishClearingRF(pVCpu);
4619}
4620
4621
4622/**
4623 * Performs a near jump to the specified address.
4624 *
4625 * May raise a \#GP(0) if the new IP outside the code segment limit.
4626 *
4627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4628 * @param uNewIp The new IP value.
4629 */
4630VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4631{
4632 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4633 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4634 pVCpu->cpum.GstCtx.rip = uNewIp;
4635 else
4636 return iemRaiseGeneralProtectionFault0(pVCpu);
4637 /** @todo Test 16-bit jump in 64-bit mode. */
4638
4639#ifndef IEM_WITH_CODE_TLB
4640 /* Flush the prefetch buffer. */
4641 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4642#endif
4643
4644 /*
4645 * Clear RF and finish the instruction (maybe raise #DB).
4646 */
4647 return iemRegFinishClearingRF(pVCpu);
4648}
4649
4650
4651/**
4652 * Performs a near jump to the specified address.
4653 *
4654 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4655 *
4656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4657 * @param uNewEip The new EIP value.
4658 */
4659VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4660{
4661 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4662 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4663
4664 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4665 pVCpu->cpum.GstCtx.rip = uNewEip;
4666 else
4667 return iemRaiseGeneralProtectionFault0(pVCpu);
4668
4669#ifndef IEM_WITH_CODE_TLB
4670 /* Flush the prefetch buffer. */
4671 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4672#endif
4673
4674 /*
4675 * Clear RF and finish the instruction (maybe raise #DB).
4676 */
4677 return iemRegFinishClearingRF(pVCpu);
4678}
4679
4680
4681/**
4682 * Performs a near jump to the specified address.
4683 *
4684 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4685 * segment limit.
4686 *
4687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4688 * @param uNewRip The new RIP value.
4689 */
4690VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4691{
4692 Assert(IEM_IS_64BIT_CODE(pVCpu));
4693
4694 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4695 pVCpu->cpum.GstCtx.rip = uNewRip;
4696 else
4697 return iemRaiseGeneralProtectionFault0(pVCpu);
4698
4699#ifndef IEM_WITH_CODE_TLB
4700 /* Flush the prefetch buffer. */
4701 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4702#endif
4703
4704 /*
4705 * Clear RF and finish the instruction (maybe raise #DB).
4706 */
4707 return iemRegFinishClearingRF(pVCpu);
4708}
4709
4710/** @} */
4711
4712
4713/** @name FPU access and helpers.
4714 *
4715 * @{
4716 */
4717
4718/**
4719 * Updates the x87.DS and FPUDP registers.
4720 *
4721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4722 * @param pFpuCtx The FPU context.
4723 * @param iEffSeg The effective segment register.
4724 * @param GCPtrEff The effective address relative to @a iEffSeg.
4725 */
4726DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4727{
4728 RTSEL sel;
4729 switch (iEffSeg)
4730 {
4731 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4732 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4733 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4734 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4735 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4736 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4737 default:
4738 AssertMsgFailed(("%d\n", iEffSeg));
4739 sel = pVCpu->cpum.GstCtx.ds.Sel;
4740 }
4741 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4742 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4743 {
4744 pFpuCtx->DS = 0;
4745 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4746 }
4747 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4748 {
4749 pFpuCtx->DS = sel;
4750 pFpuCtx->FPUDP = GCPtrEff;
4751 }
4752 else
4753 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4754}
4755
4756
4757/**
4758 * Rotates the stack registers in the push direction.
4759 *
4760 * @param pFpuCtx The FPU context.
4761 * @remarks This is a complete waste of time, but fxsave stores the registers in
4762 * stack order.
4763 */
4764DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4765{
4766 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4767 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4768 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4769 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4770 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4771 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4772 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4773 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4774 pFpuCtx->aRegs[0].r80 = r80Tmp;
4775}
4776
4777
4778/**
4779 * Rotates the stack registers in the pop direction.
4780 *
4781 * @param pFpuCtx The FPU context.
4782 * @remarks This is a complete waste of time, but fxsave stores the registers in
4783 * stack order.
4784 */
4785DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4786{
4787 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4788 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4789 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4790 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4791 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4792 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4793 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4794 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4795 pFpuCtx->aRegs[7].r80 = r80Tmp;
4796}
4797
4798
4799/**
4800 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4801 * exception prevents it.
4802 *
4803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4804 * @param pResult The FPU operation result to push.
4805 * @param pFpuCtx The FPU context.
4806 */
4807static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4808{
4809 /* Update FSW and bail if there are pending exceptions afterwards. */
4810 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4811 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4812 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4813 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4814 {
4815 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4816 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4817 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4818 pFpuCtx->FSW = fFsw;
4819 return;
4820 }
4821
4822 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4823 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4824 {
4825 /* All is fine, push the actual value. */
4826 pFpuCtx->FTW |= RT_BIT(iNewTop);
4827 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4828 }
4829 else if (pFpuCtx->FCW & X86_FCW_IM)
4830 {
4831 /* Masked stack overflow, push QNaN. */
4832 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4833 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4834 }
4835 else
4836 {
4837 /* Raise stack overflow, don't push anything. */
4838 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4839 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4840 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4841 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4842 return;
4843 }
4844
4845 fFsw &= ~X86_FSW_TOP_MASK;
4846 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4847 pFpuCtx->FSW = fFsw;
4848
4849 iemFpuRotateStackPush(pFpuCtx);
4850 RT_NOREF(pVCpu);
4851}
4852
4853
4854/**
4855 * Stores a result in a FPU register and updates the FSW and FTW.
4856 *
4857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4858 * @param pFpuCtx The FPU context.
4859 * @param pResult The result to store.
4860 * @param iStReg Which FPU register to store it in.
4861 */
4862static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4863{
4864 Assert(iStReg < 8);
4865 uint16_t fNewFsw = pFpuCtx->FSW;
4866 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4867 fNewFsw &= ~X86_FSW_C_MASK;
4868 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4869 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4870 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4871 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4872 pFpuCtx->FSW = fNewFsw;
4873 pFpuCtx->FTW |= RT_BIT(iReg);
4874 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4875 RT_NOREF(pVCpu);
4876}
4877
4878
4879/**
4880 * Only updates the FPU status word (FSW) with the result of the current
4881 * instruction.
4882 *
4883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4884 * @param pFpuCtx The FPU context.
4885 * @param u16FSW The FSW output of the current instruction.
4886 */
4887static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4888{
4889 uint16_t fNewFsw = pFpuCtx->FSW;
4890 fNewFsw &= ~X86_FSW_C_MASK;
4891 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4892 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4893 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4894 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4895 pFpuCtx->FSW = fNewFsw;
4896 RT_NOREF(pVCpu);
4897}
4898
4899
4900/**
4901 * Pops one item off the FPU stack if no pending exception prevents it.
4902 *
4903 * @param pFpuCtx The FPU context.
4904 */
4905static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4906{
4907 /* Check pending exceptions. */
4908 uint16_t uFSW = pFpuCtx->FSW;
4909 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4910 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4911 return;
4912
4913 /* TOP--. */
4914 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4915 uFSW &= ~X86_FSW_TOP_MASK;
4916 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4917 pFpuCtx->FSW = uFSW;
4918
4919 /* Mark the previous ST0 as empty. */
4920 iOldTop >>= X86_FSW_TOP_SHIFT;
4921 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4922
4923 /* Rotate the registers. */
4924 iemFpuRotateStackPop(pFpuCtx);
4925}
4926
4927
4928/**
4929 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4930 *
4931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4932 * @param pResult The FPU operation result to push.
4933 * @param uFpuOpcode The FPU opcode value.
4934 */
4935void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4936{
4937 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4938 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4939 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4940}
4941
4942
4943/**
4944 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4945 * and sets FPUDP and FPUDS.
4946 *
4947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4948 * @param pResult The FPU operation result to push.
4949 * @param iEffSeg The effective segment register.
4950 * @param GCPtrEff The effective address relative to @a iEffSeg.
4951 * @param uFpuOpcode The FPU opcode value.
4952 */
4953void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4954 uint16_t uFpuOpcode) RT_NOEXCEPT
4955{
4956 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4957 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4958 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4959 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4960}
4961
4962
4963/**
4964 * Replace ST0 with the first value and push the second onto the FPU stack,
4965 * unless a pending exception prevents it.
4966 *
4967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4968 * @param pResult The FPU operation result to store and push.
4969 * @param uFpuOpcode The FPU opcode value.
4970 */
4971void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4972{
4973 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4974 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4975
4976 /* Update FSW and bail if there are pending exceptions afterwards. */
4977 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4978 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4979 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4980 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4981 {
4982 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4983 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4984 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4985 pFpuCtx->FSW = fFsw;
4986 return;
4987 }
4988
4989 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4990 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4991 {
4992 /* All is fine, push the actual value. */
4993 pFpuCtx->FTW |= RT_BIT(iNewTop);
4994 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4995 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4996 }
4997 else if (pFpuCtx->FCW & X86_FCW_IM)
4998 {
4999 /* Masked stack overflow, push QNaN. */
5000 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5001 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5002 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5003 }
5004 else
5005 {
5006 /* Raise stack overflow, don't push anything. */
5007 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5008 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5009 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5010 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5011 return;
5012 }
5013
5014 fFsw &= ~X86_FSW_TOP_MASK;
5015 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5016 pFpuCtx->FSW = fFsw;
5017
5018 iemFpuRotateStackPush(pFpuCtx);
5019}
5020
5021
5022/**
5023 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5024 * FOP.
5025 *
5026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5027 * @param pResult The result to store.
5028 * @param iStReg Which FPU register to store it in.
5029 * @param uFpuOpcode The FPU opcode value.
5030 */
5031void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5032{
5033 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5034 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5035 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5036}
5037
5038
5039/**
5040 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5041 * FOP, and then pops the stack.
5042 *
5043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5044 * @param pResult The result to store.
5045 * @param iStReg Which FPU register to store it in.
5046 * @param uFpuOpcode The FPU opcode value.
5047 */
5048void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5049{
5050 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5051 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5052 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5053 iemFpuMaybePopOne(pFpuCtx);
5054}
5055
5056
5057/**
5058 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5059 * FPUDP, and FPUDS.
5060 *
5061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5062 * @param pResult The result to store.
5063 * @param iStReg Which FPU register to store it in.
5064 * @param iEffSeg The effective memory operand selector register.
5065 * @param GCPtrEff The effective memory operand offset.
5066 * @param uFpuOpcode The FPU opcode value.
5067 */
5068void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5069 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5070{
5071 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5072 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5073 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5074 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5075}
5076
5077
5078/**
5079 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5080 * FPUDP, and FPUDS, and then pops the stack.
5081 *
5082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5083 * @param pResult The result to store.
5084 * @param iStReg Which FPU register to store it in.
5085 * @param iEffSeg The effective memory operand selector register.
5086 * @param GCPtrEff The effective memory operand offset.
5087 * @param uFpuOpcode The FPU opcode value.
5088 */
5089void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5090 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5091{
5092 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5093 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5094 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5095 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5096 iemFpuMaybePopOne(pFpuCtx);
5097}
5098
5099
5100/**
5101 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5102 *
5103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5104 * @param uFpuOpcode The FPU opcode value.
5105 */
5106void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5107{
5108 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5109 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5110}
5111
5112
5113/**
5114 * Updates the FSW, FOP, FPUIP, and FPUCS.
5115 *
5116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5117 * @param u16FSW The FSW from the current instruction.
5118 * @param uFpuOpcode The FPU opcode value.
5119 */
5120void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5121{
5122 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5123 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5124 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5125}
5126
5127
5128/**
5129 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5130 *
5131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5132 * @param u16FSW The FSW from the current instruction.
5133 * @param uFpuOpcode The FPU opcode value.
5134 */
5135void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5136{
5137 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5138 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5139 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5140 iemFpuMaybePopOne(pFpuCtx);
5141}
5142
5143
5144/**
5145 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5148 * @param u16FSW The FSW from the current instruction.
5149 * @param iEffSeg The effective memory operand selector register.
5150 * @param GCPtrEff The effective memory operand offset.
5151 * @param uFpuOpcode The FPU opcode value.
5152 */
5153void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5154{
5155 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5156 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5157 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5158 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5159}
5160
5161
5162/**
5163 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5164 *
5165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5166 * @param u16FSW The FSW from the current instruction.
5167 * @param uFpuOpcode The FPU opcode value.
5168 */
5169void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5170{
5171 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5172 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5173 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5174 iemFpuMaybePopOne(pFpuCtx);
5175 iemFpuMaybePopOne(pFpuCtx);
5176}
5177
5178
5179/**
5180 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5181 *
5182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5183 * @param u16FSW The FSW from the current instruction.
5184 * @param iEffSeg The effective memory operand selector register.
5185 * @param GCPtrEff The effective memory operand offset.
5186 * @param uFpuOpcode The FPU opcode value.
5187 */
5188void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5189 uint16_t uFpuOpcode) RT_NOEXCEPT
5190{
5191 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5192 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5193 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5194 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5195 iemFpuMaybePopOne(pFpuCtx);
5196}
5197
5198
5199/**
5200 * Worker routine for raising an FPU stack underflow exception.
5201 *
5202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5203 * @param pFpuCtx The FPU context.
5204 * @param iStReg The stack register being accessed.
5205 */
5206static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5207{
5208 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5209 if (pFpuCtx->FCW & X86_FCW_IM)
5210 {
5211 /* Masked underflow. */
5212 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5213 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5214 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5215 if (iStReg != UINT8_MAX)
5216 {
5217 pFpuCtx->FTW |= RT_BIT(iReg);
5218 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5219 }
5220 }
5221 else
5222 {
5223 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5224 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5225 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5226 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5227 }
5228 RT_NOREF(pVCpu);
5229}
5230
5231
5232/**
5233 * Raises a FPU stack underflow exception.
5234 *
5235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5236 * @param iStReg The destination register that should be loaded
5237 * with QNaN if \#IS is not masked. Specify
5238 * UINT8_MAX if none (like for fcom).
5239 * @param uFpuOpcode The FPU opcode value.
5240 */
5241void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5242{
5243 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5244 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5245 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5246}
5247
5248
5249void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5250{
5251 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5252 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5253 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5254 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5255}
5256
5257
5258void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5259{
5260 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5261 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5262 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5263 iemFpuMaybePopOne(pFpuCtx);
5264}
5265
5266
5267void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5268 uint16_t uFpuOpcode) RT_NOEXCEPT
5269{
5270 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5271 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5272 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5273 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5274 iemFpuMaybePopOne(pFpuCtx);
5275}
5276
5277
5278void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5279{
5280 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5281 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5282 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5283 iemFpuMaybePopOne(pFpuCtx);
5284 iemFpuMaybePopOne(pFpuCtx);
5285}
5286
5287
5288void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5289{
5290 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5291 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5292
5293 if (pFpuCtx->FCW & X86_FCW_IM)
5294 {
5295 /* Masked overflow - Push QNaN. */
5296 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5297 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5298 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5299 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5300 pFpuCtx->FTW |= RT_BIT(iNewTop);
5301 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5302 iemFpuRotateStackPush(pFpuCtx);
5303 }
5304 else
5305 {
5306 /* Exception pending - don't change TOP or the register stack. */
5307 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5308 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5309 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5310 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5311 }
5312}
5313
5314
5315void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5316{
5317 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5318 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5319
5320 if (pFpuCtx->FCW & X86_FCW_IM)
5321 {
5322 /* Masked overflow - Push QNaN. */
5323 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5324 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5325 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5326 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5327 pFpuCtx->FTW |= RT_BIT(iNewTop);
5328 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5329 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5330 iemFpuRotateStackPush(pFpuCtx);
5331 }
5332 else
5333 {
5334 /* Exception pending - don't change TOP or the register stack. */
5335 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5336 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5337 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5338 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5339 }
5340}
5341
5342
5343/**
5344 * Worker routine for raising an FPU stack overflow exception on a push.
5345 *
5346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5347 * @param pFpuCtx The FPU context.
5348 */
5349static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5350{
5351 if (pFpuCtx->FCW & X86_FCW_IM)
5352 {
5353 /* Masked overflow. */
5354 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5355 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5356 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5357 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5358 pFpuCtx->FTW |= RT_BIT(iNewTop);
5359 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5360 iemFpuRotateStackPush(pFpuCtx);
5361 }
5362 else
5363 {
5364 /* Exception pending - don't change TOP or the register stack. */
5365 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5366 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5367 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5368 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5369 }
5370 RT_NOREF(pVCpu);
5371}
5372
5373
5374/**
5375 * Raises a FPU stack overflow exception on a push.
5376 *
5377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5378 */
5379void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5380{
5381 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5382 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5383 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5384}
5385
5386
5387/**
5388 * Raises a FPU stack overflow exception on a push with a memory operand.
5389 *
5390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5391 * @param iEffSeg The effective memory operand selector register.
5392 * @param GCPtrEff The effective memory operand offset.
5393 */
5394void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5395{
5396 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5397 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5398 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5399 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5400}
5401
5402/** @} */
5403
5404
5405/** @name SSE+AVX SIMD access and helpers.
5406 *
5407 * @{
5408 */
5409/**
5410 * Stores a result in a SIMD XMM register, updates the MXCSR.
5411 *
5412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5413 * @param pResult The result to store.
5414 * @param iXmmReg Which SIMD XMM register to store the result in.
5415 */
5416void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5417{
5418 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5419 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5420
5421 /* The result is only updated if there is no unmasked exception pending. */
5422 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5423 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5424 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5425}
5426
5427
5428/**
5429 * Updates the MXCSR.
5430 *
5431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5432 * @param fMxcsr The new MXCSR value.
5433 */
5434void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5435{
5436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5437 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5438}
5439/** @} */
5440
5441
5442/** @name Memory access.
5443 *
5444 * @{
5445 */
5446
5447
5448/**
5449 * Updates the IEMCPU::cbWritten counter if applicable.
5450 *
5451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5452 * @param fAccess The access being accounted for.
5453 * @param cbMem The access size.
5454 */
5455DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5456{
5457 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5458 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5459 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5460}
5461
5462
5463/**
5464 * Applies the segment limit, base and attributes.
5465 *
5466 * This may raise a \#GP or \#SS.
5467 *
5468 * @returns VBox strict status code.
5469 *
5470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5471 * @param fAccess The kind of access which is being performed.
5472 * @param iSegReg The index of the segment register to apply.
5473 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5474 * TSS, ++).
5475 * @param cbMem The access size.
5476 * @param pGCPtrMem Pointer to the guest memory address to apply
5477 * segmentation to. Input and output parameter.
5478 */
5479VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5480{
5481 if (iSegReg == UINT8_MAX)
5482 return VINF_SUCCESS;
5483
5484 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5485 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5486 switch (IEM_GET_CPU_MODE(pVCpu))
5487 {
5488 case IEMMODE_16BIT:
5489 case IEMMODE_32BIT:
5490 {
5491 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5492 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5493
5494 if ( pSel->Attr.n.u1Present
5495 && !pSel->Attr.n.u1Unusable)
5496 {
5497 Assert(pSel->Attr.n.u1DescType);
5498 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5499 {
5500 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5501 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5502 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5503
5504 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5505 {
5506 /** @todo CPL check. */
5507 }
5508
5509 /*
5510 * There are two kinds of data selectors, normal and expand down.
5511 */
5512 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5513 {
5514 if ( GCPtrFirst32 > pSel->u32Limit
5515 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5516 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5517 }
5518 else
5519 {
5520 /*
5521 * The upper boundary is defined by the B bit, not the G bit!
5522 */
5523 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5524 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5525 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5526 }
5527 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5528 }
5529 else
5530 {
5531 /*
5532 * Code selector and usually be used to read thru, writing is
5533 * only permitted in real and V8086 mode.
5534 */
5535 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5536 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5537 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5538 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5539 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5540
5541 if ( GCPtrFirst32 > pSel->u32Limit
5542 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5543 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5544
5545 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5546 {
5547 /** @todo CPL check. */
5548 }
5549
5550 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5551 }
5552 }
5553 else
5554 return iemRaiseGeneralProtectionFault0(pVCpu);
5555 return VINF_SUCCESS;
5556 }
5557
5558 case IEMMODE_64BIT:
5559 {
5560 RTGCPTR GCPtrMem = *pGCPtrMem;
5561 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5562 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5563
5564 Assert(cbMem >= 1);
5565 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5566 return VINF_SUCCESS;
5567 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5568 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5569 return iemRaiseGeneralProtectionFault0(pVCpu);
5570 }
5571
5572 default:
5573 AssertFailedReturn(VERR_IEM_IPE_7);
5574 }
5575}
5576
5577
5578/**
5579 * Translates a virtual address to a physical physical address and checks if we
5580 * can access the page as specified.
5581 *
5582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5583 * @param GCPtrMem The virtual address.
5584 * @param cbAccess The access size, for raising \#PF correctly for
5585 * FXSAVE and such.
5586 * @param fAccess The intended access.
5587 * @param pGCPhysMem Where to return the physical address.
5588 */
5589VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5590 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5591{
5592 /** @todo Need a different PGM interface here. We're currently using
5593 * generic / REM interfaces. this won't cut it for R0. */
5594 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5595 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5596 * here. */
5597 PGMPTWALK Walk;
5598 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5599 if (RT_FAILURE(rc))
5600 {
5601 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5602 /** @todo Check unassigned memory in unpaged mode. */
5603 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5604#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5605 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5606 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5607#endif
5608 *pGCPhysMem = NIL_RTGCPHYS;
5609 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5610 }
5611
5612 /* If the page is writable and does not have the no-exec bit set, all
5613 access is allowed. Otherwise we'll have to check more carefully... */
5614 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5615 {
5616 /* Write to read only memory? */
5617 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5618 && !(Walk.fEffective & X86_PTE_RW)
5619 && ( ( IEM_GET_CPL(pVCpu) == 3
5620 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5621 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5622 {
5623 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5624 *pGCPhysMem = NIL_RTGCPHYS;
5625#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5626 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5627 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5628#endif
5629 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5630 }
5631
5632 /* Kernel memory accessed by userland? */
5633 if ( !(Walk.fEffective & X86_PTE_US)
5634 && IEM_GET_CPL(pVCpu) == 3
5635 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5636 {
5637 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5638 *pGCPhysMem = NIL_RTGCPHYS;
5639#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5640 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5641 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5642#endif
5643 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5644 }
5645
5646 /* Executing non-executable memory? */
5647 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5648 && (Walk.fEffective & X86_PTE_PAE_NX)
5649 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5650 {
5651 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5652 *pGCPhysMem = NIL_RTGCPHYS;
5653#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5654 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5655 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5656#endif
5657 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5658 VERR_ACCESS_DENIED);
5659 }
5660 }
5661
5662 /*
5663 * Set the dirty / access flags.
5664 * ASSUMES this is set when the address is translated rather than on committ...
5665 */
5666 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5667 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5668 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5669 {
5670 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5671 AssertRC(rc2);
5672 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5673 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5674 }
5675
5676 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5677 *pGCPhysMem = GCPhys;
5678 return VINF_SUCCESS;
5679}
5680
5681
5682/**
5683 * Looks up a memory mapping entry.
5684 *
5685 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5687 * @param pvMem The memory address.
5688 * @param fAccess The access to.
5689 */
5690DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5691{
5692 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5693 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5694 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5695 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5696 return 0;
5697 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5698 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5699 return 1;
5700 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5701 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5702 return 2;
5703 return VERR_NOT_FOUND;
5704}
5705
5706
5707/**
5708 * Finds a free memmap entry when using iNextMapping doesn't work.
5709 *
5710 * @returns Memory mapping index, 1024 on failure.
5711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5712 */
5713static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5714{
5715 /*
5716 * The easy case.
5717 */
5718 if (pVCpu->iem.s.cActiveMappings == 0)
5719 {
5720 pVCpu->iem.s.iNextMapping = 1;
5721 return 0;
5722 }
5723
5724 /* There should be enough mappings for all instructions. */
5725 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5726
5727 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5728 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5729 return i;
5730
5731 AssertFailedReturn(1024);
5732}
5733
5734
5735/**
5736 * Commits a bounce buffer that needs writing back and unmaps it.
5737 *
5738 * @returns Strict VBox status code.
5739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5740 * @param iMemMap The index of the buffer to commit.
5741 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5742 * Always false in ring-3, obviously.
5743 */
5744static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5745{
5746 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5747 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5748#ifdef IN_RING3
5749 Assert(!fPostponeFail);
5750 RT_NOREF_PV(fPostponeFail);
5751#endif
5752
5753 /*
5754 * Do the writing.
5755 */
5756 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5757 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5758 {
5759 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5760 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5761 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5762 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5763 {
5764 /*
5765 * Carefully and efficiently dealing with access handler return
5766 * codes make this a little bloated.
5767 */
5768 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5769 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5770 pbBuf,
5771 cbFirst,
5772 PGMACCESSORIGIN_IEM);
5773 if (rcStrict == VINF_SUCCESS)
5774 {
5775 if (cbSecond)
5776 {
5777 rcStrict = PGMPhysWrite(pVM,
5778 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5779 pbBuf + cbFirst,
5780 cbSecond,
5781 PGMACCESSORIGIN_IEM);
5782 if (rcStrict == VINF_SUCCESS)
5783 { /* nothing */ }
5784 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5785 {
5786 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5789 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5790 }
5791#ifndef IN_RING3
5792 else if (fPostponeFail)
5793 {
5794 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5795 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5796 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5797 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5798 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5799 return iemSetPassUpStatus(pVCpu, rcStrict);
5800 }
5801#endif
5802 else
5803 {
5804 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5807 return rcStrict;
5808 }
5809 }
5810 }
5811 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5812 {
5813 if (!cbSecond)
5814 {
5815 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5816 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5817 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5818 }
5819 else
5820 {
5821 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5822 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5823 pbBuf + cbFirst,
5824 cbSecond,
5825 PGMACCESSORIGIN_IEM);
5826 if (rcStrict2 == VINF_SUCCESS)
5827 {
5828 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5830 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5831 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5832 }
5833 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5834 {
5835 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5836 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5837 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5838 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5839 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5840 }
5841#ifndef IN_RING3
5842 else if (fPostponeFail)
5843 {
5844 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5845 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5846 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5847 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5848 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5849 return iemSetPassUpStatus(pVCpu, rcStrict);
5850 }
5851#endif
5852 else
5853 {
5854 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5856 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5857 return rcStrict2;
5858 }
5859 }
5860 }
5861#ifndef IN_RING3
5862 else if (fPostponeFail)
5863 {
5864 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5865 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5867 if (!cbSecond)
5868 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5869 else
5870 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5871 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5872 return iemSetPassUpStatus(pVCpu, rcStrict);
5873 }
5874#endif
5875 else
5876 {
5877 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5879 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5880 return rcStrict;
5881 }
5882 }
5883 else
5884 {
5885 /*
5886 * No access handlers, much simpler.
5887 */
5888 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5889 if (RT_SUCCESS(rc))
5890 {
5891 if (cbSecond)
5892 {
5893 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5894 if (RT_SUCCESS(rc))
5895 { /* likely */ }
5896 else
5897 {
5898 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5899 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5901 return rc;
5902 }
5903 }
5904 }
5905 else
5906 {
5907 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5908 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5909 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5910 return rc;
5911 }
5912 }
5913 }
5914
5915#if defined(IEM_LOG_MEMORY_WRITES)
5916 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5917 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5918 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5919 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5920 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5921 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5922
5923 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5924 g_cbIemWrote = cbWrote;
5925 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5926#endif
5927
5928 /*
5929 * Free the mapping entry.
5930 */
5931 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5932 Assert(pVCpu->iem.s.cActiveMappings != 0);
5933 pVCpu->iem.s.cActiveMappings--;
5934 return VINF_SUCCESS;
5935}
5936
5937
5938/**
5939 * iemMemMap worker that deals with a request crossing pages.
5940 */
5941static VBOXSTRICTRC
5942iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5943{
5944 Assert(cbMem <= GUEST_PAGE_SIZE);
5945
5946 /*
5947 * Do the address translations.
5948 */
5949 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5950 RTGCPHYS GCPhysFirst;
5951 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5952 if (rcStrict != VINF_SUCCESS)
5953 return rcStrict;
5954 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5955
5956 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5957 RTGCPHYS GCPhysSecond;
5958 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5959 cbSecondPage, fAccess, &GCPhysSecond);
5960 if (rcStrict != VINF_SUCCESS)
5961 return rcStrict;
5962 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5963 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5964
5965 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5966
5967 /*
5968 * Read in the current memory content if it's a read, execute or partial
5969 * write access.
5970 */
5971 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5972
5973 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5974 {
5975 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5976 {
5977 /*
5978 * Must carefully deal with access handler status codes here,
5979 * makes the code a bit bloated.
5980 */
5981 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5982 if (rcStrict == VINF_SUCCESS)
5983 {
5984 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5985 if (rcStrict == VINF_SUCCESS)
5986 { /*likely */ }
5987 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5988 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5989 else
5990 {
5991 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5992 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5993 return rcStrict;
5994 }
5995 }
5996 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5997 {
5998 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5999 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6000 {
6001 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6002 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6003 }
6004 else
6005 {
6006 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6007 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6008 return rcStrict2;
6009 }
6010 }
6011 else
6012 {
6013 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6014 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6015 return rcStrict;
6016 }
6017 }
6018 else
6019 {
6020 /*
6021 * No informational status codes here, much more straight forward.
6022 */
6023 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6024 if (RT_SUCCESS(rc))
6025 {
6026 Assert(rc == VINF_SUCCESS);
6027 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6028 if (RT_SUCCESS(rc))
6029 Assert(rc == VINF_SUCCESS);
6030 else
6031 {
6032 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6033 return rc;
6034 }
6035 }
6036 else
6037 {
6038 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6039 return rc;
6040 }
6041 }
6042 }
6043#ifdef VBOX_STRICT
6044 else
6045 memset(pbBuf, 0xcc, cbMem);
6046 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6047 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6048#endif
6049 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6050
6051 /*
6052 * Commit the bounce buffer entry.
6053 */
6054 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6055 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6056 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6057 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6058 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6059 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6060 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6061 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6062 pVCpu->iem.s.cActiveMappings++;
6063
6064 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6065 *ppvMem = pbBuf;
6066 return VINF_SUCCESS;
6067}
6068
6069
6070/**
6071 * iemMemMap woker that deals with iemMemPageMap failures.
6072 */
6073static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6074 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6075{
6076 /*
6077 * Filter out conditions we can handle and the ones which shouldn't happen.
6078 */
6079 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6080 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6081 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6082 {
6083 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6084 return rcMap;
6085 }
6086 pVCpu->iem.s.cPotentialExits++;
6087
6088 /*
6089 * Read in the current memory content if it's a read, execute or partial
6090 * write access.
6091 */
6092 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6093 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6094 {
6095 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6096 memset(pbBuf, 0xff, cbMem);
6097 else
6098 {
6099 int rc;
6100 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6101 {
6102 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6103 if (rcStrict == VINF_SUCCESS)
6104 { /* nothing */ }
6105 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6106 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6107 else
6108 {
6109 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6110 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6111 return rcStrict;
6112 }
6113 }
6114 else
6115 {
6116 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6117 if (RT_SUCCESS(rc))
6118 { /* likely */ }
6119 else
6120 {
6121 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6122 GCPhysFirst, rc));
6123 return rc;
6124 }
6125 }
6126 }
6127 }
6128#ifdef VBOX_STRICT
6129 else
6130 memset(pbBuf, 0xcc, cbMem);
6131#endif
6132#ifdef VBOX_STRICT
6133 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6134 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6135#endif
6136
6137 /*
6138 * Commit the bounce buffer entry.
6139 */
6140 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6141 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6142 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6143 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6144 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6145 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6146 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6147 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6148 pVCpu->iem.s.cActiveMappings++;
6149
6150 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6151 *ppvMem = pbBuf;
6152 return VINF_SUCCESS;
6153}
6154
6155
6156
6157/**
6158 * Maps the specified guest memory for the given kind of access.
6159 *
6160 * This may be using bounce buffering of the memory if it's crossing a page
6161 * boundary or if there is an access handler installed for any of it. Because
6162 * of lock prefix guarantees, we're in for some extra clutter when this
6163 * happens.
6164 *
6165 * This may raise a \#GP, \#SS, \#PF or \#AC.
6166 *
6167 * @returns VBox strict status code.
6168 *
6169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6170 * @param ppvMem Where to return the pointer to the mapped memory.
6171 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6172 * 8, 12, 16, 32 or 512. When used by string operations
6173 * it can be up to a page.
6174 * @param iSegReg The index of the segment register to use for this
6175 * access. The base and limits are checked. Use UINT8_MAX
6176 * to indicate that no segmentation is required (for IDT,
6177 * GDT and LDT accesses).
6178 * @param GCPtrMem The address of the guest memory.
6179 * @param fAccess How the memory is being accessed. The
6180 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6181 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6182 * when raising exceptions.
6183 * @param uAlignCtl Alignment control:
6184 * - Bits 15:0 is the alignment mask.
6185 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6186 * IEM_MEMMAP_F_ALIGN_SSE, and
6187 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6188 * Pass zero to skip alignment.
6189 */
6190VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6191 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6192{
6193 /*
6194 * Check the input and figure out which mapping entry to use.
6195 */
6196 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6197 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6198 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6199 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6200 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6201
6202 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6203 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6204 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6205 {
6206 iMemMap = iemMemMapFindFree(pVCpu);
6207 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6208 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6209 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6210 pVCpu->iem.s.aMemMappings[2].fAccess),
6211 VERR_IEM_IPE_9);
6212 }
6213
6214 /*
6215 * Map the memory, checking that we can actually access it. If something
6216 * slightly complicated happens, fall back on bounce buffering.
6217 */
6218 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6219 if (rcStrict == VINF_SUCCESS)
6220 { /* likely */ }
6221 else
6222 return rcStrict;
6223
6224 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6225 { /* likely */ }
6226 else
6227 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6228
6229 /*
6230 * Alignment check.
6231 */
6232 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6233 { /* likelyish */ }
6234 else
6235 {
6236 /* Misaligned access. */
6237 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6238 {
6239 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6240 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6241 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6242 {
6243 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6244
6245 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6246 return iemRaiseAlignmentCheckException(pVCpu);
6247 }
6248 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6249 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6250 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6251 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6252 * that's what FXSAVE does on a 10980xe. */
6253 && iemMemAreAlignmentChecksEnabled(pVCpu))
6254 return iemRaiseAlignmentCheckException(pVCpu);
6255 else
6256 return iemRaiseGeneralProtectionFault0(pVCpu);
6257 }
6258 }
6259
6260#ifdef IEM_WITH_DATA_TLB
6261 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6262
6263 /*
6264 * Get the TLB entry for this page.
6265 */
6266 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6267 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6268 if (pTlbe->uTag == uTag)
6269 {
6270# ifdef VBOX_WITH_STATISTICS
6271 pVCpu->iem.s.DataTlb.cTlbHits++;
6272# endif
6273 }
6274 else
6275 {
6276 pVCpu->iem.s.DataTlb.cTlbMisses++;
6277 PGMPTWALK Walk;
6278 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6279 if (RT_FAILURE(rc))
6280 {
6281 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6282# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6283 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6284 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6285# endif
6286 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6287 }
6288
6289 Assert(Walk.fSucceeded);
6290 pTlbe->uTag = uTag;
6291 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6292 pTlbe->GCPhys = Walk.GCPhys;
6293 pTlbe->pbMappingR3 = NULL;
6294 }
6295
6296 /*
6297 * Check TLB page table level access flags.
6298 */
6299 /* If the page is either supervisor only or non-writable, we need to do
6300 more careful access checks. */
6301 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6302 {
6303 /* Write to read only memory? */
6304 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6305 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6306 && ( ( IEM_GET_CPL(pVCpu) == 3
6307 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6308 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6309 {
6310 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6311# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6312 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6313 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6314# endif
6315 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6316 }
6317
6318 /* Kernel memory accessed by userland? */
6319 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6320 && IEM_GET_CPL(pVCpu) == 3
6321 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6322 {
6323 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6324# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6325 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6326 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6327# endif
6328 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6329 }
6330 }
6331
6332 /*
6333 * Set the dirty / access flags.
6334 * ASSUMES this is set when the address is translated rather than on commit...
6335 */
6336 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6337 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6338 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6339 {
6340 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6341 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6342 AssertRC(rc2);
6343 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6344 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6345 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6346 }
6347
6348 /*
6349 * Look up the physical page info if necessary.
6350 */
6351 uint8_t *pbMem = NULL;
6352 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6353# ifdef IN_RING3
6354 pbMem = pTlbe->pbMappingR3;
6355# else
6356 pbMem = NULL;
6357# endif
6358 else
6359 {
6360 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6361 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6362 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6363 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6364 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6365 { /* likely */ }
6366 else
6367 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6368 pTlbe->pbMappingR3 = NULL;
6369 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6370 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6371 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6372 &pbMem, &pTlbe->fFlagsAndPhysRev);
6373 AssertRCReturn(rc, rc);
6374# ifdef IN_RING3
6375 pTlbe->pbMappingR3 = pbMem;
6376# endif
6377 }
6378
6379 /*
6380 * Check the physical page level access and mapping.
6381 */
6382 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6383 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6384 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6385 { /* probably likely */ }
6386 else
6387 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6388 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6389 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6390 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6391 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6392 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6393
6394 if (pbMem)
6395 {
6396 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6397 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6398 fAccess |= IEM_ACCESS_NOT_LOCKED;
6399 }
6400 else
6401 {
6402 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6403 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6404 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6405 if (rcStrict != VINF_SUCCESS)
6406 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6407 }
6408
6409 void * const pvMem = pbMem;
6410
6411 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6412 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6413 if (fAccess & IEM_ACCESS_TYPE_READ)
6414 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6415
6416#else /* !IEM_WITH_DATA_TLB */
6417
6418 RTGCPHYS GCPhysFirst;
6419 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6420 if (rcStrict != VINF_SUCCESS)
6421 return rcStrict;
6422
6423 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6424 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6425 if (fAccess & IEM_ACCESS_TYPE_READ)
6426 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6427
6428 void *pvMem;
6429 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6430 if (rcStrict != VINF_SUCCESS)
6431 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6432
6433#endif /* !IEM_WITH_DATA_TLB */
6434
6435 /*
6436 * Fill in the mapping table entry.
6437 */
6438 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6439 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6440 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6441 pVCpu->iem.s.cActiveMappings += 1;
6442
6443 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6444 *ppvMem = pvMem;
6445
6446 return VINF_SUCCESS;
6447}
6448
6449
6450/**
6451 * Commits the guest memory if bounce buffered and unmaps it.
6452 *
6453 * @returns Strict VBox status code.
6454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6455 * @param pvMem The mapping.
6456 * @param fAccess The kind of access.
6457 */
6458VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6459{
6460 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6461 AssertReturn(iMemMap >= 0, iMemMap);
6462
6463 /* If it's bounce buffered, we may need to write back the buffer. */
6464 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6465 {
6466 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6467 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6468 }
6469 /* Otherwise unlock it. */
6470 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6471 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6472
6473 /* Free the entry. */
6474 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6475 Assert(pVCpu->iem.s.cActiveMappings != 0);
6476 pVCpu->iem.s.cActiveMappings--;
6477 return VINF_SUCCESS;
6478}
6479
6480#ifdef IEM_WITH_SETJMP
6481
6482/**
6483 * Maps the specified guest memory for the given kind of access, longjmp on
6484 * error.
6485 *
6486 * This may be using bounce buffering of the memory if it's crossing a page
6487 * boundary or if there is an access handler installed for any of it. Because
6488 * of lock prefix guarantees, we're in for some extra clutter when this
6489 * happens.
6490 *
6491 * This may raise a \#GP, \#SS, \#PF or \#AC.
6492 *
6493 * @returns Pointer to the mapped memory.
6494 *
6495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6496 * @param cbMem The number of bytes to map. This is usually 1,
6497 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6498 * string operations it can be up to a page.
6499 * @param iSegReg The index of the segment register to use for
6500 * this access. The base and limits are checked.
6501 * Use UINT8_MAX to indicate that no segmentation
6502 * is required (for IDT, GDT and LDT accesses).
6503 * @param GCPtrMem The address of the guest memory.
6504 * @param fAccess How the memory is being accessed. The
6505 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6506 * how to map the memory, while the
6507 * IEM_ACCESS_WHAT_XXX bit is used when raising
6508 * exceptions.
6509 * @param uAlignCtl Alignment control:
6510 * - Bits 15:0 is the alignment mask.
6511 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6512 * IEM_MEMMAP_F_ALIGN_SSE, and
6513 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6514 * Pass zero to skip alignment.
6515 */
6516void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6517 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6518{
6519 /*
6520 * Check the input, check segment access and adjust address
6521 * with segment base.
6522 */
6523 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6524 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6525 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6526
6527 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6528 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6529 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6530
6531 /*
6532 * Alignment check.
6533 */
6534 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6535 { /* likelyish */ }
6536 else
6537 {
6538 /* Misaligned access. */
6539 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6540 {
6541 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6542 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6543 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6544 {
6545 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6546
6547 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6548 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6549 }
6550 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6551 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6552 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6553 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6554 * that's what FXSAVE does on a 10980xe. */
6555 && iemMemAreAlignmentChecksEnabled(pVCpu))
6556 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6557 else
6558 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6559 }
6560 }
6561
6562 /*
6563 * Figure out which mapping entry to use.
6564 */
6565 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6566 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6567 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6568 {
6569 iMemMap = iemMemMapFindFree(pVCpu);
6570 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6571 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6572 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6573 pVCpu->iem.s.aMemMappings[2].fAccess),
6574 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6575 }
6576
6577 /*
6578 * Crossing a page boundary?
6579 */
6580 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6581 { /* No (likely). */ }
6582 else
6583 {
6584 void *pvMem;
6585 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6586 if (rcStrict == VINF_SUCCESS)
6587 return pvMem;
6588 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6589 }
6590
6591#ifdef IEM_WITH_DATA_TLB
6592 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6593
6594 /*
6595 * Get the TLB entry for this page.
6596 */
6597 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6598 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6599 if (pTlbe->uTag == uTag)
6600 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6601 else
6602 {
6603 pVCpu->iem.s.DataTlb.cTlbMisses++;
6604 PGMPTWALK Walk;
6605 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6606 if (RT_FAILURE(rc))
6607 {
6608 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6609# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6610 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6611 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6612# endif
6613 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6614 }
6615
6616 Assert(Walk.fSucceeded);
6617 pTlbe->uTag = uTag;
6618 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6619 pTlbe->GCPhys = Walk.GCPhys;
6620 pTlbe->pbMappingR3 = NULL;
6621 }
6622
6623 /*
6624 * Check the flags and physical revision.
6625 */
6626 /** @todo make the caller pass these in with fAccess. */
6627 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6628 ? IEMTLBE_F_PT_NO_USER : 0;
6629 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6630 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6631 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6632 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6633 ? IEMTLBE_F_PT_NO_WRITE : 0)
6634 : 0;
6635 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6636 uint8_t *pbMem = NULL;
6637 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6638 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6639# ifdef IN_RING3
6640 pbMem = pTlbe->pbMappingR3;
6641# else
6642 pbMem = NULL;
6643# endif
6644 else
6645 {
6646 /*
6647 * Okay, something isn't quite right or needs refreshing.
6648 */
6649 /* Write to read only memory? */
6650 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6651 {
6652 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6653# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6654 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6655 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6656# endif
6657 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6658 }
6659
6660 /* Kernel memory accessed by userland? */
6661 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6662 {
6663 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6664# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6665 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6666 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6667# endif
6668 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6669 }
6670
6671 /* Set the dirty / access flags.
6672 ASSUMES this is set when the address is translated rather than on commit... */
6673 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6674 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6675 {
6676 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6677 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6678 AssertRC(rc2);
6679 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6680 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6681 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6682 }
6683
6684 /*
6685 * Check if the physical page info needs updating.
6686 */
6687 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6688# ifdef IN_RING3
6689 pbMem = pTlbe->pbMappingR3;
6690# else
6691 pbMem = NULL;
6692# endif
6693 else
6694 {
6695 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6696 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6697 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6698 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6699 pTlbe->pbMappingR3 = NULL;
6700 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6701 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6702 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6703 &pbMem, &pTlbe->fFlagsAndPhysRev);
6704 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6705# ifdef IN_RING3
6706 pTlbe->pbMappingR3 = pbMem;
6707# endif
6708 }
6709
6710 /*
6711 * Check the physical page level access and mapping.
6712 */
6713 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6714 { /* probably likely */ }
6715 else
6716 {
6717 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6718 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6719 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6720 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6721 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6722 if (rcStrict == VINF_SUCCESS)
6723 return pbMem;
6724 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6725 }
6726 }
6727 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6728
6729 if (pbMem)
6730 {
6731 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6732 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6733 fAccess |= IEM_ACCESS_NOT_LOCKED;
6734 }
6735 else
6736 {
6737 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6738 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6739 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6740 if (rcStrict == VINF_SUCCESS)
6741 return pbMem;
6742 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6743 }
6744
6745 void * const pvMem = pbMem;
6746
6747 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6748 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6749 if (fAccess & IEM_ACCESS_TYPE_READ)
6750 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6751
6752#else /* !IEM_WITH_DATA_TLB */
6753
6754
6755 RTGCPHYS GCPhysFirst;
6756 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6757 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6758 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6759
6760 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6761 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6762 if (fAccess & IEM_ACCESS_TYPE_READ)
6763 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6764
6765 void *pvMem;
6766 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6767 if (rcStrict == VINF_SUCCESS)
6768 { /* likely */ }
6769 else
6770 {
6771 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6772 if (rcStrict == VINF_SUCCESS)
6773 return pvMem;
6774 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6775 }
6776
6777#endif /* !IEM_WITH_DATA_TLB */
6778
6779 /*
6780 * Fill in the mapping table entry.
6781 */
6782 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6783 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6784 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6785 pVCpu->iem.s.cActiveMappings++;
6786
6787 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6788 return pvMem;
6789}
6790
6791
6792/**
6793 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6794 *
6795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6796 * @param pvMem The mapping.
6797 * @param fAccess The kind of access.
6798 */
6799void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6800{
6801 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6802 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6803
6804 /* If it's bounce buffered, we may need to write back the buffer. */
6805 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6806 {
6807 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6808 {
6809 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6810 if (rcStrict == VINF_SUCCESS)
6811 return;
6812 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6813 }
6814 }
6815 /* Otherwise unlock it. */
6816 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6817 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6818
6819 /* Free the entry. */
6820 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6821 Assert(pVCpu->iem.s.cActiveMappings != 0);
6822 pVCpu->iem.s.cActiveMappings--;
6823}
6824
6825#endif /* IEM_WITH_SETJMP */
6826
6827#ifndef IN_RING3
6828/**
6829 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6830 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6831 *
6832 * Allows the instruction to be completed and retired, while the IEM user will
6833 * return to ring-3 immediately afterwards and do the postponed writes there.
6834 *
6835 * @returns VBox status code (no strict statuses). Caller must check
6836 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6838 * @param pvMem The mapping.
6839 * @param fAccess The kind of access.
6840 */
6841VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6842{
6843 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6844 AssertReturn(iMemMap >= 0, iMemMap);
6845
6846 /* If it's bounce buffered, we may need to write back the buffer. */
6847 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6848 {
6849 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6850 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6851 }
6852 /* Otherwise unlock it. */
6853 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6854 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6855
6856 /* Free the entry. */
6857 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6858 Assert(pVCpu->iem.s.cActiveMappings != 0);
6859 pVCpu->iem.s.cActiveMappings--;
6860 return VINF_SUCCESS;
6861}
6862#endif
6863
6864
6865/**
6866 * Rollbacks mappings, releasing page locks and such.
6867 *
6868 * The caller shall only call this after checking cActiveMappings.
6869 *
6870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6871 */
6872void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6873{
6874 Assert(pVCpu->iem.s.cActiveMappings > 0);
6875
6876 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6877 while (iMemMap-- > 0)
6878 {
6879 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6880 if (fAccess != IEM_ACCESS_INVALID)
6881 {
6882 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6883 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6884 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6885 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6886 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6887 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6888 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6889 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6890 pVCpu->iem.s.cActiveMappings--;
6891 }
6892 }
6893}
6894
6895
6896/**
6897 * Fetches a data byte.
6898 *
6899 * @returns Strict VBox status code.
6900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6901 * @param pu8Dst Where to return the byte.
6902 * @param iSegReg The index of the segment register to use for
6903 * this access. The base and limits are checked.
6904 * @param GCPtrMem The address of the guest memory.
6905 */
6906VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6907{
6908 /* The lazy approach for now... */
6909 uint8_t const *pu8Src;
6910 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6911 if (rc == VINF_SUCCESS)
6912 {
6913 *pu8Dst = *pu8Src;
6914 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6915 }
6916 return rc;
6917}
6918
6919
6920#ifdef IEM_WITH_SETJMP
6921/**
6922 * Fetches a data byte, longjmp on error.
6923 *
6924 * @returns The byte.
6925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6926 * @param iSegReg The index of the segment register to use for
6927 * this access. The base and limits are checked.
6928 * @param GCPtrMem The address of the guest memory.
6929 */
6930uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6931{
6932 /* The lazy approach for now... */
6933 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6934 uint8_t const bRet = *pu8Src;
6935 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6936 return bRet;
6937}
6938#endif /* IEM_WITH_SETJMP */
6939
6940
6941/**
6942 * Fetches a data word.
6943 *
6944 * @returns Strict VBox status code.
6945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6946 * @param pu16Dst Where to return the word.
6947 * @param iSegReg The index of the segment register to use for
6948 * this access. The base and limits are checked.
6949 * @param GCPtrMem The address of the guest memory.
6950 */
6951VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6952{
6953 /* The lazy approach for now... */
6954 uint16_t const *pu16Src;
6955 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6956 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6957 if (rc == VINF_SUCCESS)
6958 {
6959 *pu16Dst = *pu16Src;
6960 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6961 }
6962 return rc;
6963}
6964
6965
6966#ifdef IEM_WITH_SETJMP
6967/**
6968 * Fetches a data word, longjmp on error.
6969 *
6970 * @returns The word
6971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6972 * @param iSegReg The index of the segment register to use for
6973 * this access. The base and limits are checked.
6974 * @param GCPtrMem The address of the guest memory.
6975 */
6976uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6977{
6978 /* The lazy approach for now... */
6979 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6980 sizeof(*pu16Src) - 1);
6981 uint16_t const u16Ret = *pu16Src;
6982 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6983 return u16Ret;
6984}
6985#endif
6986
6987
6988/**
6989 * Fetches a data dword.
6990 *
6991 * @returns Strict VBox status code.
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 * @param pu32Dst Where to return the dword.
6994 * @param iSegReg The index of the segment register to use for
6995 * this access. The base and limits are checked.
6996 * @param GCPtrMem The address of the guest memory.
6997 */
6998VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6999{
7000 /* The lazy approach for now... */
7001 uint32_t const *pu32Src;
7002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7003 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7004 if (rc == VINF_SUCCESS)
7005 {
7006 *pu32Dst = *pu32Src;
7007 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7008 }
7009 return rc;
7010}
7011
7012
7013/**
7014 * Fetches a data dword and zero extends it to a qword.
7015 *
7016 * @returns Strict VBox status code.
7017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7018 * @param pu64Dst Where to return the qword.
7019 * @param iSegReg The index of the segment register to use for
7020 * this access. The base and limits are checked.
7021 * @param GCPtrMem The address of the guest memory.
7022 */
7023VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7024{
7025 /* The lazy approach for now... */
7026 uint32_t const *pu32Src;
7027 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7028 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7029 if (rc == VINF_SUCCESS)
7030 {
7031 *pu64Dst = *pu32Src;
7032 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7033 }
7034 return rc;
7035}
7036
7037
7038#ifdef IEM_WITH_SETJMP
7039
7040/**
7041 * Fetches a data dword, longjmp on error, fallback/safe version.
7042 *
7043 * @returns The dword
7044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7045 * @param iSegReg The index of the segment register to use for
7046 * this access. The base and limits are checked.
7047 * @param GCPtrMem The address of the guest memory.
7048 */
7049uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7050{
7051 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7052 sizeof(*pu32Src) - 1);
7053 uint32_t const u32Ret = *pu32Src;
7054 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7055 return u32Ret;
7056}
7057
7058
7059/**
7060 * Fetches a data dword, longjmp on error.
7061 *
7062 * @returns The dword
7063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7064 * @param iSegReg The index of the segment register to use for
7065 * this access. The base and limits are checked.
7066 * @param GCPtrMem The address of the guest memory.
7067 */
7068uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7069{
7070# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7071 /*
7072 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7073 */
7074 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7075 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7076 {
7077 /*
7078 * TLB lookup.
7079 */
7080 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7081 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7082 if (pTlbe->uTag == uTag)
7083 {
7084 /*
7085 * Check TLB page table level access flags.
7086 */
7087 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7088 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7089 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7090 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7091 {
7092 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7093
7094 /*
7095 * Alignment check:
7096 */
7097 /** @todo check priority \#AC vs \#PF */
7098 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7099 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7100 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7101 || IEM_GET_CPL(pVCpu) != 3)
7102 {
7103 /*
7104 * Fetch and return the dword
7105 */
7106 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7107 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7108 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7109 }
7110 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7111 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7112 }
7113 }
7114 }
7115
7116 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7117 outdated page pointer, or other troubles. */
7118 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7119 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7120
7121# else
7122 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7123 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7124 uint32_t const u32Ret = *pu32Src;
7125 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7126 return u32Ret;
7127# endif
7128}
7129#endif
7130
7131
7132#ifdef SOME_UNUSED_FUNCTION
7133/**
7134 * Fetches a data dword and sign extends it to a qword.
7135 *
7136 * @returns Strict VBox status code.
7137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7138 * @param pu64Dst Where to return the sign extended value.
7139 * @param iSegReg The index of the segment register to use for
7140 * this access. The base and limits are checked.
7141 * @param GCPtrMem The address of the guest memory.
7142 */
7143VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7144{
7145 /* The lazy approach for now... */
7146 int32_t const *pi32Src;
7147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7148 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7149 if (rc == VINF_SUCCESS)
7150 {
7151 *pu64Dst = *pi32Src;
7152 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7153 }
7154#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7155 else
7156 *pu64Dst = 0;
7157#endif
7158 return rc;
7159}
7160#endif
7161
7162
7163/**
7164 * Fetches a data qword.
7165 *
7166 * @returns Strict VBox status code.
7167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7168 * @param pu64Dst Where to return the qword.
7169 * @param iSegReg The index of the segment register to use for
7170 * this access. The base and limits are checked.
7171 * @param GCPtrMem The address of the guest memory.
7172 */
7173VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7174{
7175 /* The lazy approach for now... */
7176 uint64_t const *pu64Src;
7177 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7178 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7179 if (rc == VINF_SUCCESS)
7180 {
7181 *pu64Dst = *pu64Src;
7182 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7183 }
7184 return rc;
7185}
7186
7187
7188#ifdef IEM_WITH_SETJMP
7189/**
7190 * Fetches a data qword, longjmp on error.
7191 *
7192 * @returns The qword.
7193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7194 * @param iSegReg The index of the segment register to use for
7195 * this access. The base and limits are checked.
7196 * @param GCPtrMem The address of the guest memory.
7197 */
7198uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7199{
7200 /* The lazy approach for now... */
7201 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7202 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7203 uint64_t const u64Ret = *pu64Src;
7204 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7205 return u64Ret;
7206}
7207#endif
7208
7209
7210/**
7211 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7212 *
7213 * @returns Strict VBox status code.
7214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7215 * @param pu64Dst Where to return the qword.
7216 * @param iSegReg The index of the segment register to use for
7217 * this access. The base and limits are checked.
7218 * @param GCPtrMem The address of the guest memory.
7219 */
7220VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7221{
7222 /* The lazy approach for now... */
7223 uint64_t const *pu64Src;
7224 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7225 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7226 if (rc == VINF_SUCCESS)
7227 {
7228 *pu64Dst = *pu64Src;
7229 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7230 }
7231 return rc;
7232}
7233
7234
7235#ifdef IEM_WITH_SETJMP
7236/**
7237 * Fetches a data qword, longjmp on error.
7238 *
7239 * @returns The qword.
7240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7241 * @param iSegReg The index of the segment register to use for
7242 * this access. The base and limits are checked.
7243 * @param GCPtrMem The address of the guest memory.
7244 */
7245uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7246{
7247 /* The lazy approach for now... */
7248 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7249 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7250 uint64_t const u64Ret = *pu64Src;
7251 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7252 return u64Ret;
7253}
7254#endif
7255
7256
7257/**
7258 * Fetches a data tword.
7259 *
7260 * @returns Strict VBox status code.
7261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7262 * @param pr80Dst Where to return the tword.
7263 * @param iSegReg The index of the segment register to use for
7264 * this access. The base and limits are checked.
7265 * @param GCPtrMem The address of the guest memory.
7266 */
7267VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7268{
7269 /* The lazy approach for now... */
7270 PCRTFLOAT80U pr80Src;
7271 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7272 if (rc == VINF_SUCCESS)
7273 {
7274 *pr80Dst = *pr80Src;
7275 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7276 }
7277 return rc;
7278}
7279
7280
7281#ifdef IEM_WITH_SETJMP
7282/**
7283 * Fetches a data tword, longjmp on error.
7284 *
7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7286 * @param pr80Dst Where to return the tword.
7287 * @param iSegReg The index of the segment register to use for
7288 * this access. The base and limits are checked.
7289 * @param GCPtrMem The address of the guest memory.
7290 */
7291void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7292{
7293 /* The lazy approach for now... */
7294 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7295 *pr80Dst = *pr80Src;
7296 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7297}
7298#endif
7299
7300
7301/**
7302 * Fetches a data decimal tword.
7303 *
7304 * @returns Strict VBox status code.
7305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7306 * @param pd80Dst Where to return the tword.
7307 * @param iSegReg The index of the segment register to use for
7308 * this access. The base and limits are checked.
7309 * @param GCPtrMem The address of the guest memory.
7310 */
7311VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7312{
7313 /* The lazy approach for now... */
7314 PCRTPBCD80U pd80Src;
7315 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7316 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7317 if (rc == VINF_SUCCESS)
7318 {
7319 *pd80Dst = *pd80Src;
7320 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7321 }
7322 return rc;
7323}
7324
7325
7326#ifdef IEM_WITH_SETJMP
7327/**
7328 * Fetches a data decimal tword, longjmp on error.
7329 *
7330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7331 * @param pd80Dst Where to return the tword.
7332 * @param iSegReg The index of the segment register to use for
7333 * this access. The base and limits are checked.
7334 * @param GCPtrMem The address of the guest memory.
7335 */
7336void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7337{
7338 /* The lazy approach for now... */
7339 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7340 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7341 *pd80Dst = *pd80Src;
7342 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7343}
7344#endif
7345
7346
7347/**
7348 * Fetches a data dqword (double qword), generally SSE related.
7349 *
7350 * @returns Strict VBox status code.
7351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7352 * @param pu128Dst Where to return the qword.
7353 * @param iSegReg The index of the segment register to use for
7354 * this access. The base and limits are checked.
7355 * @param GCPtrMem The address of the guest memory.
7356 */
7357VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7358{
7359 /* The lazy approach for now... */
7360 PCRTUINT128U pu128Src;
7361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7362 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7363 if (rc == VINF_SUCCESS)
7364 {
7365 pu128Dst->au64[0] = pu128Src->au64[0];
7366 pu128Dst->au64[1] = pu128Src->au64[1];
7367 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7368 }
7369 return rc;
7370}
7371
7372
7373#ifdef IEM_WITH_SETJMP
7374/**
7375 * Fetches a data dqword (double qword), generally SSE related.
7376 *
7377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7378 * @param pu128Dst Where to return the qword.
7379 * @param iSegReg The index of the segment register to use for
7380 * this access. The base and limits are checked.
7381 * @param GCPtrMem The address of the guest memory.
7382 */
7383void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7384{
7385 /* The lazy approach for now... */
7386 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7387 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7388 pu128Dst->au64[0] = pu128Src->au64[0];
7389 pu128Dst->au64[1] = pu128Src->au64[1];
7390 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7391}
7392#endif
7393
7394
7395/**
7396 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7397 * related.
7398 *
7399 * Raises \#GP(0) if not aligned.
7400 *
7401 * @returns Strict VBox status code.
7402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7403 * @param pu128Dst Where to return the qword.
7404 * @param iSegReg The index of the segment register to use for
7405 * this access. The base and limits are checked.
7406 * @param GCPtrMem The address of the guest memory.
7407 */
7408VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7409{
7410 /* The lazy approach for now... */
7411 PCRTUINT128U pu128Src;
7412 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7413 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7414 if (rc == VINF_SUCCESS)
7415 {
7416 pu128Dst->au64[0] = pu128Src->au64[0];
7417 pu128Dst->au64[1] = pu128Src->au64[1];
7418 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7419 }
7420 return rc;
7421}
7422
7423
7424#ifdef IEM_WITH_SETJMP
7425/**
7426 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7427 * related, longjmp on error.
7428 *
7429 * Raises \#GP(0) if not aligned.
7430 *
7431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7432 * @param pu128Dst Where to return the qword.
7433 * @param iSegReg The index of the segment register to use for
7434 * this access. The base and limits are checked.
7435 * @param GCPtrMem The address of the guest memory.
7436 */
7437void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7438 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7439{
7440 /* The lazy approach for now... */
7441 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7442 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7443 pu128Dst->au64[0] = pu128Src->au64[0];
7444 pu128Dst->au64[1] = pu128Src->au64[1];
7445 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7446}
7447#endif
7448
7449
7450/**
7451 * Fetches a data oword (octo word), generally AVX related.
7452 *
7453 * @returns Strict VBox status code.
7454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7455 * @param pu256Dst Where to return the qword.
7456 * @param iSegReg The index of the segment register to use for
7457 * this access. The base and limits are checked.
7458 * @param GCPtrMem The address of the guest memory.
7459 */
7460VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7461{
7462 /* The lazy approach for now... */
7463 PCRTUINT256U pu256Src;
7464 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7465 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7466 if (rc == VINF_SUCCESS)
7467 {
7468 pu256Dst->au64[0] = pu256Src->au64[0];
7469 pu256Dst->au64[1] = pu256Src->au64[1];
7470 pu256Dst->au64[2] = pu256Src->au64[2];
7471 pu256Dst->au64[3] = pu256Src->au64[3];
7472 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7473 }
7474 return rc;
7475}
7476
7477
7478#ifdef IEM_WITH_SETJMP
7479/**
7480 * Fetches a data oword (octo word), generally AVX related.
7481 *
7482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7483 * @param pu256Dst Where to return the qword.
7484 * @param iSegReg The index of the segment register to use for
7485 * this access. The base and limits are checked.
7486 * @param GCPtrMem The address of the guest memory.
7487 */
7488void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7489{
7490 /* The lazy approach for now... */
7491 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7492 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7493 pu256Dst->au64[0] = pu256Src->au64[0];
7494 pu256Dst->au64[1] = pu256Src->au64[1];
7495 pu256Dst->au64[2] = pu256Src->au64[2];
7496 pu256Dst->au64[3] = pu256Src->au64[3];
7497 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7498}
7499#endif
7500
7501
7502/**
7503 * Fetches a data oword (octo word) at an aligned address, generally AVX
7504 * related.
7505 *
7506 * Raises \#GP(0) if not aligned.
7507 *
7508 * @returns Strict VBox status code.
7509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7510 * @param pu256Dst Where to return the qword.
7511 * @param iSegReg The index of the segment register to use for
7512 * this access. The base and limits are checked.
7513 * @param GCPtrMem The address of the guest memory.
7514 */
7515VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7516{
7517 /* The lazy approach for now... */
7518 PCRTUINT256U pu256Src;
7519 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7520 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7521 if (rc == VINF_SUCCESS)
7522 {
7523 pu256Dst->au64[0] = pu256Src->au64[0];
7524 pu256Dst->au64[1] = pu256Src->au64[1];
7525 pu256Dst->au64[2] = pu256Src->au64[2];
7526 pu256Dst->au64[3] = pu256Src->au64[3];
7527 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7528 }
7529 return rc;
7530}
7531
7532
7533#ifdef IEM_WITH_SETJMP
7534/**
7535 * Fetches a data oword (octo word) at an aligned address, generally AVX
7536 * related, longjmp on error.
7537 *
7538 * Raises \#GP(0) if not aligned.
7539 *
7540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7541 * @param pu256Dst Where to return the qword.
7542 * @param iSegReg The index of the segment register to use for
7543 * this access. The base and limits are checked.
7544 * @param GCPtrMem The address of the guest memory.
7545 */
7546void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7547 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7548{
7549 /* The lazy approach for now... */
7550 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7551 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7552 pu256Dst->au64[0] = pu256Src->au64[0];
7553 pu256Dst->au64[1] = pu256Src->au64[1];
7554 pu256Dst->au64[2] = pu256Src->au64[2];
7555 pu256Dst->au64[3] = pu256Src->au64[3];
7556 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7557}
7558#endif
7559
7560
7561
7562/**
7563 * Fetches a descriptor register (lgdt, lidt).
7564 *
7565 * @returns Strict VBox status code.
7566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7567 * @param pcbLimit Where to return the limit.
7568 * @param pGCPtrBase Where to return the base.
7569 * @param iSegReg The index of the segment register to use for
7570 * this access. The base and limits are checked.
7571 * @param GCPtrMem The address of the guest memory.
7572 * @param enmOpSize The effective operand size.
7573 */
7574VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7575 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7576{
7577 /*
7578 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7579 * little special:
7580 * - The two reads are done separately.
7581 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7582 * - We suspect the 386 to actually commit the limit before the base in
7583 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7584 * don't try emulate this eccentric behavior, because it's not well
7585 * enough understood and rather hard to trigger.
7586 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7587 */
7588 VBOXSTRICTRC rcStrict;
7589 if (IEM_IS_64BIT_CODE(pVCpu))
7590 {
7591 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7592 if (rcStrict == VINF_SUCCESS)
7593 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7594 }
7595 else
7596 {
7597 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7598 if (enmOpSize == IEMMODE_32BIT)
7599 {
7600 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7601 {
7602 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7603 if (rcStrict == VINF_SUCCESS)
7604 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7605 }
7606 else
7607 {
7608 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7609 if (rcStrict == VINF_SUCCESS)
7610 {
7611 *pcbLimit = (uint16_t)uTmp;
7612 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7613 }
7614 }
7615 if (rcStrict == VINF_SUCCESS)
7616 *pGCPtrBase = uTmp;
7617 }
7618 else
7619 {
7620 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7621 if (rcStrict == VINF_SUCCESS)
7622 {
7623 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7624 if (rcStrict == VINF_SUCCESS)
7625 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7626 }
7627 }
7628 }
7629 return rcStrict;
7630}
7631
7632
7633
7634/**
7635 * Stores a data byte.
7636 *
7637 * @returns Strict VBox status code.
7638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7639 * @param iSegReg The index of the segment register to use for
7640 * this access. The base and limits are checked.
7641 * @param GCPtrMem The address of the guest memory.
7642 * @param u8Value The value to store.
7643 */
7644VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7645{
7646 /* The lazy approach for now... */
7647 uint8_t *pu8Dst;
7648 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7649 if (rc == VINF_SUCCESS)
7650 {
7651 *pu8Dst = u8Value;
7652 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7653 }
7654 return rc;
7655}
7656
7657
7658#ifdef IEM_WITH_SETJMP
7659/**
7660 * Stores a data byte, longjmp on error.
7661 *
7662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7663 * @param iSegReg The index of the segment register to use for
7664 * this access. The base and limits are checked.
7665 * @param GCPtrMem The address of the guest memory.
7666 * @param u8Value The value to store.
7667 */
7668void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7669{
7670 /* The lazy approach for now... */
7671 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7672 *pu8Dst = u8Value;
7673 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7674}
7675#endif
7676
7677
7678/**
7679 * Stores a data word.
7680 *
7681 * @returns Strict VBox status code.
7682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7683 * @param iSegReg The index of the segment register to use for
7684 * this access. The base and limits are checked.
7685 * @param GCPtrMem The address of the guest memory.
7686 * @param u16Value The value to store.
7687 */
7688VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7689{
7690 /* The lazy approach for now... */
7691 uint16_t *pu16Dst;
7692 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7693 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7694 if (rc == VINF_SUCCESS)
7695 {
7696 *pu16Dst = u16Value;
7697 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7698 }
7699 return rc;
7700}
7701
7702
7703#ifdef IEM_WITH_SETJMP
7704/**
7705 * Stores a data word, longjmp on error.
7706 *
7707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7708 * @param iSegReg The index of the segment register to use for
7709 * this access. The base and limits are checked.
7710 * @param GCPtrMem The address of the guest memory.
7711 * @param u16Value The value to store.
7712 */
7713void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7714{
7715 /* The lazy approach for now... */
7716 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7717 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7718 *pu16Dst = u16Value;
7719 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7720}
7721#endif
7722
7723
7724/**
7725 * Stores a data dword.
7726 *
7727 * @returns Strict VBox status code.
7728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7729 * @param iSegReg The index of the segment register to use for
7730 * this access. The base and limits are checked.
7731 * @param GCPtrMem The address of the guest memory.
7732 * @param u32Value The value to store.
7733 */
7734VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7735{
7736 /* The lazy approach for now... */
7737 uint32_t *pu32Dst;
7738 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7739 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7740 if (rc == VINF_SUCCESS)
7741 {
7742 *pu32Dst = u32Value;
7743 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7744 }
7745 return rc;
7746}
7747
7748
7749#ifdef IEM_WITH_SETJMP
7750/**
7751 * Stores a data dword.
7752 *
7753 * @returns Strict VBox status code.
7754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7755 * @param iSegReg The index of the segment register to use for
7756 * this access. The base and limits are checked.
7757 * @param GCPtrMem The address of the guest memory.
7758 * @param u32Value The value to store.
7759 */
7760void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7761{
7762 /* The lazy approach for now... */
7763 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7764 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7765 *pu32Dst = u32Value;
7766 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7767}
7768#endif
7769
7770
7771/**
7772 * Stores a data qword.
7773 *
7774 * @returns Strict VBox status code.
7775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7776 * @param iSegReg The index of the segment register to use for
7777 * this access. The base and limits are checked.
7778 * @param GCPtrMem The address of the guest memory.
7779 * @param u64Value The value to store.
7780 */
7781VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7782{
7783 /* The lazy approach for now... */
7784 uint64_t *pu64Dst;
7785 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7786 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7787 if (rc == VINF_SUCCESS)
7788 {
7789 *pu64Dst = u64Value;
7790 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7791 }
7792 return rc;
7793}
7794
7795
7796#ifdef IEM_WITH_SETJMP
7797/**
7798 * Stores a data qword, longjmp on error.
7799 *
7800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7801 * @param iSegReg The index of the segment register to use for
7802 * this access. The base and limits are checked.
7803 * @param GCPtrMem The address of the guest memory.
7804 * @param u64Value The value to store.
7805 */
7806void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7807{
7808 /* The lazy approach for now... */
7809 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7810 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7811 *pu64Dst = u64Value;
7812 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7813}
7814#endif
7815
7816
7817/**
7818 * Stores a data dqword.
7819 *
7820 * @returns Strict VBox status code.
7821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7822 * @param iSegReg The index of the segment register to use for
7823 * this access. The base and limits are checked.
7824 * @param GCPtrMem The address of the guest memory.
7825 * @param u128Value The value to store.
7826 */
7827VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7828{
7829 /* The lazy approach for now... */
7830 PRTUINT128U pu128Dst;
7831 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7832 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7833 if (rc == VINF_SUCCESS)
7834 {
7835 pu128Dst->au64[0] = u128Value.au64[0];
7836 pu128Dst->au64[1] = u128Value.au64[1];
7837 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7838 }
7839 return rc;
7840}
7841
7842
7843#ifdef IEM_WITH_SETJMP
7844/**
7845 * Stores a data dqword, longjmp on error.
7846 *
7847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7848 * @param iSegReg The index of the segment register to use for
7849 * this access. The base and limits are checked.
7850 * @param GCPtrMem The address of the guest memory.
7851 * @param u128Value The value to store.
7852 */
7853void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7854{
7855 /* The lazy approach for now... */
7856 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7857 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7858 pu128Dst->au64[0] = u128Value.au64[0];
7859 pu128Dst->au64[1] = u128Value.au64[1];
7860 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7861}
7862#endif
7863
7864
7865/**
7866 * Stores a data dqword, SSE aligned.
7867 *
7868 * @returns Strict VBox status code.
7869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7870 * @param iSegReg The index of the segment register to use for
7871 * this access. The base and limits are checked.
7872 * @param GCPtrMem The address of the guest memory.
7873 * @param u128Value The value to store.
7874 */
7875VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7876{
7877 /* The lazy approach for now... */
7878 PRTUINT128U pu128Dst;
7879 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7880 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7881 if (rc == VINF_SUCCESS)
7882 {
7883 pu128Dst->au64[0] = u128Value.au64[0];
7884 pu128Dst->au64[1] = u128Value.au64[1];
7885 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7886 }
7887 return rc;
7888}
7889
7890
7891#ifdef IEM_WITH_SETJMP
7892/**
7893 * Stores a data dqword, SSE aligned.
7894 *
7895 * @returns Strict VBox status code.
7896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7897 * @param iSegReg The index of the segment register to use for
7898 * this access. The base and limits are checked.
7899 * @param GCPtrMem The address of the guest memory.
7900 * @param u128Value The value to store.
7901 */
7902void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7903 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7904{
7905 /* The lazy approach for now... */
7906 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7907 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7908 pu128Dst->au64[0] = u128Value.au64[0];
7909 pu128Dst->au64[1] = u128Value.au64[1];
7910 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7911}
7912#endif
7913
7914
7915/**
7916 * Stores a data dqword.
7917 *
7918 * @returns Strict VBox status code.
7919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7920 * @param iSegReg The index of the segment register to use for
7921 * this access. The base and limits are checked.
7922 * @param GCPtrMem The address of the guest memory.
7923 * @param pu256Value Pointer to the value to store.
7924 */
7925VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7926{
7927 /* The lazy approach for now... */
7928 PRTUINT256U pu256Dst;
7929 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7930 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7931 if (rc == VINF_SUCCESS)
7932 {
7933 pu256Dst->au64[0] = pu256Value->au64[0];
7934 pu256Dst->au64[1] = pu256Value->au64[1];
7935 pu256Dst->au64[2] = pu256Value->au64[2];
7936 pu256Dst->au64[3] = pu256Value->au64[3];
7937 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7938 }
7939 return rc;
7940}
7941
7942
7943#ifdef IEM_WITH_SETJMP
7944/**
7945 * Stores a data dqword, longjmp on error.
7946 *
7947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7948 * @param iSegReg The index of the segment register to use for
7949 * this access. The base and limits are checked.
7950 * @param GCPtrMem The address of the guest memory.
7951 * @param pu256Value Pointer to the value to store.
7952 */
7953void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7954{
7955 /* The lazy approach for now... */
7956 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7957 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7958 pu256Dst->au64[0] = pu256Value->au64[0];
7959 pu256Dst->au64[1] = pu256Value->au64[1];
7960 pu256Dst->au64[2] = pu256Value->au64[2];
7961 pu256Dst->au64[3] = pu256Value->au64[3];
7962 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7963}
7964#endif
7965
7966
7967/**
7968 * Stores a data dqword, AVX \#GP(0) aligned.
7969 *
7970 * @returns Strict VBox status code.
7971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7972 * @param iSegReg The index of the segment register to use for
7973 * this access. The base and limits are checked.
7974 * @param GCPtrMem The address of the guest memory.
7975 * @param pu256Value Pointer to the value to store.
7976 */
7977VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7978{
7979 /* The lazy approach for now... */
7980 PRTUINT256U pu256Dst;
7981 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7982 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7983 if (rc == VINF_SUCCESS)
7984 {
7985 pu256Dst->au64[0] = pu256Value->au64[0];
7986 pu256Dst->au64[1] = pu256Value->au64[1];
7987 pu256Dst->au64[2] = pu256Value->au64[2];
7988 pu256Dst->au64[3] = pu256Value->au64[3];
7989 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7990 }
7991 return rc;
7992}
7993
7994
7995#ifdef IEM_WITH_SETJMP
7996/**
7997 * Stores a data dqword, AVX aligned.
7998 *
7999 * @returns Strict VBox status code.
8000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8001 * @param iSegReg The index of the segment register to use for
8002 * this access. The base and limits are checked.
8003 * @param GCPtrMem The address of the guest memory.
8004 * @param pu256Value Pointer to the value to store.
8005 */
8006void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
8007 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
8008{
8009 /* The lazy approach for now... */
8010 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
8011 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
8012 pu256Dst->au64[0] = pu256Value->au64[0];
8013 pu256Dst->au64[1] = pu256Value->au64[1];
8014 pu256Dst->au64[2] = pu256Value->au64[2];
8015 pu256Dst->au64[3] = pu256Value->au64[3];
8016 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8017}
8018#endif
8019
8020
8021/**
8022 * Stores a descriptor register (sgdt, sidt).
8023 *
8024 * @returns Strict VBox status code.
8025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8026 * @param cbLimit The limit.
8027 * @param GCPtrBase The base address.
8028 * @param iSegReg The index of the segment register to use for
8029 * this access. The base and limits are checked.
8030 * @param GCPtrMem The address of the guest memory.
8031 */
8032VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8033{
8034 /*
8035 * The SIDT and SGDT instructions actually stores the data using two
8036 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8037 * does not respond to opsize prefixes.
8038 */
8039 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8040 if (rcStrict == VINF_SUCCESS)
8041 {
8042 if (IEM_IS_16BIT_CODE(pVCpu))
8043 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8044 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8045 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8046 else if (IEM_IS_32BIT_CODE(pVCpu))
8047 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8048 else
8049 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8050 }
8051 return rcStrict;
8052}
8053
8054
8055/**
8056 * Pushes a word onto the stack.
8057 *
8058 * @returns Strict VBox status code.
8059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8060 * @param u16Value The value to push.
8061 */
8062VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8063{
8064 /* Increment the stack pointer. */
8065 uint64_t uNewRsp;
8066 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8067
8068 /* Write the word the lazy way. */
8069 uint16_t *pu16Dst;
8070 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8071 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8072 if (rc == VINF_SUCCESS)
8073 {
8074 *pu16Dst = u16Value;
8075 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8076 }
8077
8078 /* Commit the new RSP value unless we an access handler made trouble. */
8079 if (rc == VINF_SUCCESS)
8080 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8081
8082 return rc;
8083}
8084
8085
8086/**
8087 * Pushes a dword onto the stack.
8088 *
8089 * @returns Strict VBox status code.
8090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8091 * @param u32Value The value to push.
8092 */
8093VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8094{
8095 /* Increment the stack pointer. */
8096 uint64_t uNewRsp;
8097 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8098
8099 /* Write the dword the lazy way. */
8100 uint32_t *pu32Dst;
8101 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8102 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8103 if (rc == VINF_SUCCESS)
8104 {
8105 *pu32Dst = u32Value;
8106 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8107 }
8108
8109 /* Commit the new RSP value unless we an access handler made trouble. */
8110 if (rc == VINF_SUCCESS)
8111 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8112
8113 return rc;
8114}
8115
8116
8117/**
8118 * Pushes a dword segment register value onto the stack.
8119 *
8120 * @returns Strict VBox status code.
8121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8122 * @param u32Value The value to push.
8123 */
8124VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8125{
8126 /* Increment the stack pointer. */
8127 uint64_t uNewRsp;
8128 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8129
8130 /* The intel docs talks about zero extending the selector register
8131 value. My actual intel CPU here might be zero extending the value
8132 but it still only writes the lower word... */
8133 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8134 * happens when crossing an electric page boundrary, is the high word checked
8135 * for write accessibility or not? Probably it is. What about segment limits?
8136 * It appears this behavior is also shared with trap error codes.
8137 *
8138 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8139 * ancient hardware when it actually did change. */
8140 uint16_t *pu16Dst;
8141 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8142 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8143 if (rc == VINF_SUCCESS)
8144 {
8145 *pu16Dst = (uint16_t)u32Value;
8146 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8147 }
8148
8149 /* Commit the new RSP value unless we an access handler made trouble. */
8150 if (rc == VINF_SUCCESS)
8151 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8152
8153 return rc;
8154}
8155
8156
8157/**
8158 * Pushes a qword onto the stack.
8159 *
8160 * @returns Strict VBox status code.
8161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8162 * @param u64Value The value to push.
8163 */
8164VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8165{
8166 /* Increment the stack pointer. */
8167 uint64_t uNewRsp;
8168 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8169
8170 /* Write the word the lazy way. */
8171 uint64_t *pu64Dst;
8172 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8173 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8174 if (rc == VINF_SUCCESS)
8175 {
8176 *pu64Dst = u64Value;
8177 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8178 }
8179
8180 /* Commit the new RSP value unless we an access handler made trouble. */
8181 if (rc == VINF_SUCCESS)
8182 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8183
8184 return rc;
8185}
8186
8187
8188/**
8189 * Pops a word from the stack.
8190 *
8191 * @returns Strict VBox status code.
8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8193 * @param pu16Value Where to store the popped value.
8194 */
8195VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8196{
8197 /* Increment the stack pointer. */
8198 uint64_t uNewRsp;
8199 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8200
8201 /* Write the word the lazy way. */
8202 uint16_t const *pu16Src;
8203 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8204 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8205 if (rc == VINF_SUCCESS)
8206 {
8207 *pu16Value = *pu16Src;
8208 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8209
8210 /* Commit the new RSP value. */
8211 if (rc == VINF_SUCCESS)
8212 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8213 }
8214
8215 return rc;
8216}
8217
8218
8219/**
8220 * Pops a dword from the stack.
8221 *
8222 * @returns Strict VBox status code.
8223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8224 * @param pu32Value Where to store the popped value.
8225 */
8226VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8227{
8228 /* Increment the stack pointer. */
8229 uint64_t uNewRsp;
8230 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8231
8232 /* Write the word the lazy way. */
8233 uint32_t const *pu32Src;
8234 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8235 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8236 if (rc == VINF_SUCCESS)
8237 {
8238 *pu32Value = *pu32Src;
8239 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8240
8241 /* Commit the new RSP value. */
8242 if (rc == VINF_SUCCESS)
8243 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8244 }
8245
8246 return rc;
8247}
8248
8249
8250/**
8251 * Pops a qword from the stack.
8252 *
8253 * @returns Strict VBox status code.
8254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8255 * @param pu64Value Where to store the popped value.
8256 */
8257VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8258{
8259 /* Increment the stack pointer. */
8260 uint64_t uNewRsp;
8261 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8262
8263 /* Write the word the lazy way. */
8264 uint64_t const *pu64Src;
8265 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8266 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8267 if (rc == VINF_SUCCESS)
8268 {
8269 *pu64Value = *pu64Src;
8270 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8271
8272 /* Commit the new RSP value. */
8273 if (rc == VINF_SUCCESS)
8274 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8275 }
8276
8277 return rc;
8278}
8279
8280
8281/**
8282 * Pushes a word onto the stack, using a temporary stack pointer.
8283 *
8284 * @returns Strict VBox status code.
8285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8286 * @param u16Value The value to push.
8287 * @param pTmpRsp Pointer to the temporary stack pointer.
8288 */
8289VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8290{
8291 /* Increment the stack pointer. */
8292 RTUINT64U NewRsp = *pTmpRsp;
8293 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8294
8295 /* Write the word the lazy way. */
8296 uint16_t *pu16Dst;
8297 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8298 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8299 if (rc == VINF_SUCCESS)
8300 {
8301 *pu16Dst = u16Value;
8302 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8303 }
8304
8305 /* Commit the new RSP value unless we an access handler made trouble. */
8306 if (rc == VINF_SUCCESS)
8307 *pTmpRsp = NewRsp;
8308
8309 return rc;
8310}
8311
8312
8313/**
8314 * Pushes a dword onto the stack, using a temporary stack pointer.
8315 *
8316 * @returns Strict VBox status code.
8317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8318 * @param u32Value The value to push.
8319 * @param pTmpRsp Pointer to the temporary stack pointer.
8320 */
8321VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8322{
8323 /* Increment the stack pointer. */
8324 RTUINT64U NewRsp = *pTmpRsp;
8325 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8326
8327 /* Write the word the lazy way. */
8328 uint32_t *pu32Dst;
8329 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8330 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8331 if (rc == VINF_SUCCESS)
8332 {
8333 *pu32Dst = u32Value;
8334 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8335 }
8336
8337 /* Commit the new RSP value unless we an access handler made trouble. */
8338 if (rc == VINF_SUCCESS)
8339 *pTmpRsp = NewRsp;
8340
8341 return rc;
8342}
8343
8344
8345/**
8346 * Pushes a dword onto the stack, using a temporary stack pointer.
8347 *
8348 * @returns Strict VBox status code.
8349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8350 * @param u64Value The value to push.
8351 * @param pTmpRsp Pointer to the temporary stack pointer.
8352 */
8353VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8354{
8355 /* Increment the stack pointer. */
8356 RTUINT64U NewRsp = *pTmpRsp;
8357 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8358
8359 /* Write the word the lazy way. */
8360 uint64_t *pu64Dst;
8361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8362 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8363 if (rc == VINF_SUCCESS)
8364 {
8365 *pu64Dst = u64Value;
8366 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8367 }
8368
8369 /* Commit the new RSP value unless we an access handler made trouble. */
8370 if (rc == VINF_SUCCESS)
8371 *pTmpRsp = NewRsp;
8372
8373 return rc;
8374}
8375
8376
8377/**
8378 * Pops a word from the stack, using a temporary stack pointer.
8379 *
8380 * @returns Strict VBox status code.
8381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8382 * @param pu16Value Where to store the popped value.
8383 * @param pTmpRsp Pointer to the temporary stack pointer.
8384 */
8385VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8386{
8387 /* Increment the stack pointer. */
8388 RTUINT64U NewRsp = *pTmpRsp;
8389 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8390
8391 /* Write the word the lazy way. */
8392 uint16_t const *pu16Src;
8393 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8394 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8395 if (rc == VINF_SUCCESS)
8396 {
8397 *pu16Value = *pu16Src;
8398 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8399
8400 /* Commit the new RSP value. */
8401 if (rc == VINF_SUCCESS)
8402 *pTmpRsp = NewRsp;
8403 }
8404
8405 return rc;
8406}
8407
8408
8409/**
8410 * Pops a dword from the stack, using a temporary stack pointer.
8411 *
8412 * @returns Strict VBox status code.
8413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8414 * @param pu32Value Where to store the popped value.
8415 * @param pTmpRsp Pointer to the temporary stack pointer.
8416 */
8417VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8418{
8419 /* Increment the stack pointer. */
8420 RTUINT64U NewRsp = *pTmpRsp;
8421 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8422
8423 /* Write the word the lazy way. */
8424 uint32_t const *pu32Src;
8425 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8426 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8427 if (rc == VINF_SUCCESS)
8428 {
8429 *pu32Value = *pu32Src;
8430 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8431
8432 /* Commit the new RSP value. */
8433 if (rc == VINF_SUCCESS)
8434 *pTmpRsp = NewRsp;
8435 }
8436
8437 return rc;
8438}
8439
8440
8441/**
8442 * Pops a qword from the stack, using a temporary stack pointer.
8443 *
8444 * @returns Strict VBox status code.
8445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8446 * @param pu64Value Where to store the popped value.
8447 * @param pTmpRsp Pointer to the temporary stack pointer.
8448 */
8449VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8450{
8451 /* Increment the stack pointer. */
8452 RTUINT64U NewRsp = *pTmpRsp;
8453 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8454
8455 /* Write the word the lazy way. */
8456 uint64_t const *pu64Src;
8457 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8458 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8459 if (rcStrict == VINF_SUCCESS)
8460 {
8461 *pu64Value = *pu64Src;
8462 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8463
8464 /* Commit the new RSP value. */
8465 if (rcStrict == VINF_SUCCESS)
8466 *pTmpRsp = NewRsp;
8467 }
8468
8469 return rcStrict;
8470}
8471
8472
8473/**
8474 * Begin a special stack push (used by interrupt, exceptions and such).
8475 *
8476 * This will raise \#SS or \#PF if appropriate.
8477 *
8478 * @returns Strict VBox status code.
8479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8480 * @param cbMem The number of bytes to push onto the stack.
8481 * @param cbAlign The alignment mask (7, 3, 1).
8482 * @param ppvMem Where to return the pointer to the stack memory.
8483 * As with the other memory functions this could be
8484 * direct access or bounce buffered access, so
8485 * don't commit register until the commit call
8486 * succeeds.
8487 * @param puNewRsp Where to return the new RSP value. This must be
8488 * passed unchanged to
8489 * iemMemStackPushCommitSpecial().
8490 */
8491VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8492 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8493{
8494 Assert(cbMem < UINT8_MAX);
8495 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8496 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8497 IEM_ACCESS_STACK_W, cbAlign);
8498}
8499
8500
8501/**
8502 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8503 *
8504 * This will update the rSP.
8505 *
8506 * @returns Strict VBox status code.
8507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8508 * @param pvMem The pointer returned by
8509 * iemMemStackPushBeginSpecial().
8510 * @param uNewRsp The new RSP value returned by
8511 * iemMemStackPushBeginSpecial().
8512 */
8513VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8514{
8515 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8516 if (rcStrict == VINF_SUCCESS)
8517 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8518 return rcStrict;
8519}
8520
8521
8522/**
8523 * Begin a special stack pop (used by iret, retf and such).
8524 *
8525 * This will raise \#SS or \#PF if appropriate.
8526 *
8527 * @returns Strict VBox status code.
8528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8529 * @param cbMem The number of bytes to pop from the stack.
8530 * @param cbAlign The alignment mask (7, 3, 1).
8531 * @param ppvMem Where to return the pointer to the stack memory.
8532 * @param puNewRsp Where to return the new RSP value. This must be
8533 * assigned to CPUMCTX::rsp manually some time
8534 * after iemMemStackPopDoneSpecial() has been
8535 * called.
8536 */
8537VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8538 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8539{
8540 Assert(cbMem < UINT8_MAX);
8541 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8542 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8543}
8544
8545
8546/**
8547 * Continue a special stack pop (used by iret and retf), for the purpose of
8548 * retrieving a new stack pointer.
8549 *
8550 * This will raise \#SS or \#PF if appropriate.
8551 *
8552 * @returns Strict VBox status code.
8553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8554 * @param off Offset from the top of the stack. This is zero
8555 * except in the retf case.
8556 * @param cbMem The number of bytes to pop from the stack.
8557 * @param ppvMem Where to return the pointer to the stack memory.
8558 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8559 * return this because all use of this function is
8560 * to retrieve a new value and anything we return
8561 * here would be discarded.)
8562 */
8563VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8564 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8565{
8566 Assert(cbMem < UINT8_MAX);
8567
8568 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8569 RTGCPTR GCPtrTop;
8570 if (IEM_IS_64BIT_CODE(pVCpu))
8571 GCPtrTop = uCurNewRsp;
8572 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8573 GCPtrTop = (uint32_t)uCurNewRsp;
8574 else
8575 GCPtrTop = (uint16_t)uCurNewRsp;
8576
8577 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8578 0 /* checked in iemMemStackPopBeginSpecial */);
8579}
8580
8581
8582/**
8583 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8584 * iemMemStackPopContinueSpecial).
8585 *
8586 * The caller will manually commit the rSP.
8587 *
8588 * @returns Strict VBox status code.
8589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8590 * @param pvMem The pointer returned by
8591 * iemMemStackPopBeginSpecial() or
8592 * iemMemStackPopContinueSpecial().
8593 */
8594VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8595{
8596 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8597}
8598
8599
8600/**
8601 * Fetches a system table byte.
8602 *
8603 * @returns Strict VBox status code.
8604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8605 * @param pbDst Where to return the byte.
8606 * @param iSegReg The index of the segment register to use for
8607 * this access. The base and limits are checked.
8608 * @param GCPtrMem The address of the guest memory.
8609 */
8610VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8611{
8612 /* The lazy approach for now... */
8613 uint8_t const *pbSrc;
8614 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8615 if (rc == VINF_SUCCESS)
8616 {
8617 *pbDst = *pbSrc;
8618 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8619 }
8620 return rc;
8621}
8622
8623
8624/**
8625 * Fetches a system table word.
8626 *
8627 * @returns Strict VBox status code.
8628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8629 * @param pu16Dst Where to return the word.
8630 * @param iSegReg The index of the segment register to use for
8631 * this access. The base and limits are checked.
8632 * @param GCPtrMem The address of the guest memory.
8633 */
8634VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8635{
8636 /* The lazy approach for now... */
8637 uint16_t const *pu16Src;
8638 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8639 if (rc == VINF_SUCCESS)
8640 {
8641 *pu16Dst = *pu16Src;
8642 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8643 }
8644 return rc;
8645}
8646
8647
8648/**
8649 * Fetches a system table dword.
8650 *
8651 * @returns Strict VBox status code.
8652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8653 * @param pu32Dst Where to return the dword.
8654 * @param iSegReg The index of the segment register to use for
8655 * this access. The base and limits are checked.
8656 * @param GCPtrMem The address of the guest memory.
8657 */
8658VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8659{
8660 /* The lazy approach for now... */
8661 uint32_t const *pu32Src;
8662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8663 if (rc == VINF_SUCCESS)
8664 {
8665 *pu32Dst = *pu32Src;
8666 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8667 }
8668 return rc;
8669}
8670
8671
8672/**
8673 * Fetches a system table qword.
8674 *
8675 * @returns Strict VBox status code.
8676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8677 * @param pu64Dst Where to return the qword.
8678 * @param iSegReg The index of the segment register to use for
8679 * this access. The base and limits are checked.
8680 * @param GCPtrMem The address of the guest memory.
8681 */
8682VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8683{
8684 /* The lazy approach for now... */
8685 uint64_t const *pu64Src;
8686 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8687 if (rc == VINF_SUCCESS)
8688 {
8689 *pu64Dst = *pu64Src;
8690 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8691 }
8692 return rc;
8693}
8694
8695
8696/**
8697 * Fetches a descriptor table entry with caller specified error code.
8698 *
8699 * @returns Strict VBox status code.
8700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8701 * @param pDesc Where to return the descriptor table entry.
8702 * @param uSel The selector which table entry to fetch.
8703 * @param uXcpt The exception to raise on table lookup error.
8704 * @param uErrorCode The error code associated with the exception.
8705 */
8706static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8707 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8708{
8709 AssertPtr(pDesc);
8710 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8711
8712 /** @todo did the 286 require all 8 bytes to be accessible? */
8713 /*
8714 * Get the selector table base and check bounds.
8715 */
8716 RTGCPTR GCPtrBase;
8717 if (uSel & X86_SEL_LDT)
8718 {
8719 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8720 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8721 {
8722 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8723 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8724 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8725 uErrorCode, 0);
8726 }
8727
8728 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8729 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8730 }
8731 else
8732 {
8733 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8734 {
8735 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8736 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8737 uErrorCode, 0);
8738 }
8739 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8740 }
8741
8742 /*
8743 * Read the legacy descriptor and maybe the long mode extensions if
8744 * required.
8745 */
8746 VBOXSTRICTRC rcStrict;
8747 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8748 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8749 else
8750 {
8751 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8752 if (rcStrict == VINF_SUCCESS)
8753 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8754 if (rcStrict == VINF_SUCCESS)
8755 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8756 if (rcStrict == VINF_SUCCESS)
8757 pDesc->Legacy.au16[3] = 0;
8758 else
8759 return rcStrict;
8760 }
8761
8762 if (rcStrict == VINF_SUCCESS)
8763 {
8764 if ( !IEM_IS_LONG_MODE(pVCpu)
8765 || pDesc->Legacy.Gen.u1DescType)
8766 pDesc->Long.au64[1] = 0;
8767 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8768 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8769 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8770 else
8771 {
8772 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8773 /** @todo is this the right exception? */
8774 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8775 }
8776 }
8777 return rcStrict;
8778}
8779
8780
8781/**
8782 * Fetches a descriptor table entry.
8783 *
8784 * @returns Strict VBox status code.
8785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8786 * @param pDesc Where to return the descriptor table entry.
8787 * @param uSel The selector which table entry to fetch.
8788 * @param uXcpt The exception to raise on table lookup error.
8789 */
8790VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8791{
8792 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8793}
8794
8795
8796/**
8797 * Marks the selector descriptor as accessed (only non-system descriptors).
8798 *
8799 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8800 * will therefore skip the limit checks.
8801 *
8802 * @returns Strict VBox status code.
8803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8804 * @param uSel The selector.
8805 */
8806VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8807{
8808 /*
8809 * Get the selector table base and calculate the entry address.
8810 */
8811 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8812 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8813 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8814 GCPtr += uSel & X86_SEL_MASK;
8815
8816 /*
8817 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8818 * ugly stuff to avoid this. This will make sure it's an atomic access
8819 * as well more or less remove any question about 8-bit or 32-bit accesss.
8820 */
8821 VBOXSTRICTRC rcStrict;
8822 uint32_t volatile *pu32;
8823 if ((GCPtr & 3) == 0)
8824 {
8825 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8826 GCPtr += 2 + 2;
8827 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8828 if (rcStrict != VINF_SUCCESS)
8829 return rcStrict;
8830 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8831 }
8832 else
8833 {
8834 /* The misaligned GDT/LDT case, map the whole thing. */
8835 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8836 if (rcStrict != VINF_SUCCESS)
8837 return rcStrict;
8838 switch ((uintptr_t)pu32 & 3)
8839 {
8840 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8841 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8842 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8843 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8844 }
8845 }
8846
8847 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8848}
8849
8850/** @} */
8851
8852/** @name Opcode Helpers.
8853 * @{
8854 */
8855
8856/**
8857 * Calculates the effective address of a ModR/M memory operand.
8858 *
8859 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8860 *
8861 * @return Strict VBox status code.
8862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8863 * @param bRm The ModRM byte.
8864 * @param cbImmAndRspOffset - First byte: The size of any immediate
8865 * following the effective address opcode bytes
8866 * (only for RIP relative addressing).
8867 * - Second byte: RSP displacement (for POP [ESP]).
8868 * @param pGCPtrEff Where to return the effective address.
8869 */
8870VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8871{
8872 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8873# define SET_SS_DEF() \
8874 do \
8875 { \
8876 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8877 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8878 } while (0)
8879
8880 if (!IEM_IS_64BIT_CODE(pVCpu))
8881 {
8882/** @todo Check the effective address size crap! */
8883 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8884 {
8885 uint16_t u16EffAddr;
8886
8887 /* Handle the disp16 form with no registers first. */
8888 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8889 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8890 else
8891 {
8892 /* Get the displacment. */
8893 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8894 {
8895 case 0: u16EffAddr = 0; break;
8896 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8897 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8898 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8899 }
8900
8901 /* Add the base and index registers to the disp. */
8902 switch (bRm & X86_MODRM_RM_MASK)
8903 {
8904 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8905 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8906 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8907 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8908 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8909 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8910 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8911 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8912 }
8913 }
8914
8915 *pGCPtrEff = u16EffAddr;
8916 }
8917 else
8918 {
8919 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8920 uint32_t u32EffAddr;
8921
8922 /* Handle the disp32 form with no registers first. */
8923 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8924 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8925 else
8926 {
8927 /* Get the register (or SIB) value. */
8928 switch ((bRm & X86_MODRM_RM_MASK))
8929 {
8930 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8931 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8932 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8933 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8934 case 4: /* SIB */
8935 {
8936 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8937
8938 /* Get the index and scale it. */
8939 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8940 {
8941 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8942 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8943 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8944 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8945 case 4: u32EffAddr = 0; /*none */ break;
8946 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8947 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8948 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8949 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8950 }
8951 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8952
8953 /* add base */
8954 switch (bSib & X86_SIB_BASE_MASK)
8955 {
8956 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8957 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8958 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8959 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8960 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8961 case 5:
8962 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8963 {
8964 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8965 SET_SS_DEF();
8966 }
8967 else
8968 {
8969 uint32_t u32Disp;
8970 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8971 u32EffAddr += u32Disp;
8972 }
8973 break;
8974 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8975 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8976 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8977 }
8978 break;
8979 }
8980 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8981 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8982 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8983 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8984 }
8985
8986 /* Get and add the displacement. */
8987 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8988 {
8989 case 0:
8990 break;
8991 case 1:
8992 {
8993 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8994 u32EffAddr += i8Disp;
8995 break;
8996 }
8997 case 2:
8998 {
8999 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9000 u32EffAddr += u32Disp;
9001 break;
9002 }
9003 default:
9004 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9005 }
9006
9007 }
9008 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9009 *pGCPtrEff = u32EffAddr;
9010 else
9011 {
9012 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9013 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9014 }
9015 }
9016 }
9017 else
9018 {
9019 uint64_t u64EffAddr;
9020
9021 /* Handle the rip+disp32 form with no registers first. */
9022 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9023 {
9024 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9025 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9026 }
9027 else
9028 {
9029 /* Get the register (or SIB) value. */
9030 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9031 {
9032 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9033 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9034 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9035 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9036 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9037 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9038 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9039 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9040 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9041 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9042 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9043 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9044 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9045 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9046 /* SIB */
9047 case 4:
9048 case 12:
9049 {
9050 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9051
9052 /* Get the index and scale it. */
9053 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9054 {
9055 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9056 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9057 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9058 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9059 case 4: u64EffAddr = 0; /*none */ break;
9060 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9061 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9062 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9063 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9064 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9065 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9066 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9067 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9068 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9069 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9070 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9071 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9072 }
9073 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9074
9075 /* add base */
9076 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9077 {
9078 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9079 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9080 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9081 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9082 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9083 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9084 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9085 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9086 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9087 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9088 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9089 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9090 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9091 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9092 /* complicated encodings */
9093 case 5:
9094 case 13:
9095 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9096 {
9097 if (!pVCpu->iem.s.uRexB)
9098 {
9099 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9100 SET_SS_DEF();
9101 }
9102 else
9103 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9104 }
9105 else
9106 {
9107 uint32_t u32Disp;
9108 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9109 u64EffAddr += (int32_t)u32Disp;
9110 }
9111 break;
9112 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9113 }
9114 break;
9115 }
9116 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9117 }
9118
9119 /* Get and add the displacement. */
9120 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9121 {
9122 case 0:
9123 break;
9124 case 1:
9125 {
9126 int8_t i8Disp;
9127 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9128 u64EffAddr += i8Disp;
9129 break;
9130 }
9131 case 2:
9132 {
9133 uint32_t u32Disp;
9134 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9135 u64EffAddr += (int32_t)u32Disp;
9136 break;
9137 }
9138 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9139 }
9140
9141 }
9142
9143 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9144 *pGCPtrEff = u64EffAddr;
9145 else
9146 {
9147 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9148 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9149 }
9150 }
9151
9152 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9153 return VINF_SUCCESS;
9154}
9155
9156
9157#ifdef IEM_WITH_SETJMP
9158/**
9159 * Calculates the effective address of a ModR/M memory operand.
9160 *
9161 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9162 *
9163 * May longjmp on internal error.
9164 *
9165 * @return The effective address.
9166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9167 * @param bRm The ModRM byte.
9168 * @param cbImmAndRspOffset - First byte: The size of any immediate
9169 * following the effective address opcode bytes
9170 * (only for RIP relative addressing).
9171 * - Second byte: RSP displacement (for POP [ESP]).
9172 */
9173RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9174{
9175 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9176# define SET_SS_DEF() \
9177 do \
9178 { \
9179 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9180 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9181 } while (0)
9182
9183 if (!IEM_IS_64BIT_CODE(pVCpu))
9184 {
9185/** @todo Check the effective address size crap! */
9186 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9187 {
9188 uint16_t u16EffAddr;
9189
9190 /* Handle the disp16 form with no registers first. */
9191 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9192 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9193 else
9194 {
9195 /* Get the displacment. */
9196 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9197 {
9198 case 0: u16EffAddr = 0; break;
9199 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9200 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9201 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9202 }
9203
9204 /* Add the base and index registers to the disp. */
9205 switch (bRm & X86_MODRM_RM_MASK)
9206 {
9207 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9208 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9209 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9210 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9211 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9212 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9213 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9214 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9215 }
9216 }
9217
9218 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9219 return u16EffAddr;
9220 }
9221
9222 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9223 uint32_t u32EffAddr;
9224
9225 /* Handle the disp32 form with no registers first. */
9226 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9227 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9228 else
9229 {
9230 /* Get the register (or SIB) value. */
9231 switch ((bRm & X86_MODRM_RM_MASK))
9232 {
9233 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9234 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9235 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9236 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9237 case 4: /* SIB */
9238 {
9239 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9240
9241 /* Get the index and scale it. */
9242 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9243 {
9244 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9245 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9246 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9247 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9248 case 4: u32EffAddr = 0; /*none */ break;
9249 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9250 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9251 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9252 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9253 }
9254 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9255
9256 /* add base */
9257 switch (bSib & X86_SIB_BASE_MASK)
9258 {
9259 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9260 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9261 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9262 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9263 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9264 case 5:
9265 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9266 {
9267 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9268 SET_SS_DEF();
9269 }
9270 else
9271 {
9272 uint32_t u32Disp;
9273 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9274 u32EffAddr += u32Disp;
9275 }
9276 break;
9277 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9278 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9279 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9280 }
9281 break;
9282 }
9283 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9284 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9285 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9286 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9287 }
9288
9289 /* Get and add the displacement. */
9290 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9291 {
9292 case 0:
9293 break;
9294 case 1:
9295 {
9296 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9297 u32EffAddr += i8Disp;
9298 break;
9299 }
9300 case 2:
9301 {
9302 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9303 u32EffAddr += u32Disp;
9304 break;
9305 }
9306 default:
9307 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9308 }
9309 }
9310
9311 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9312 {
9313 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9314 return u32EffAddr;
9315 }
9316 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9317 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9318 return u32EffAddr & UINT16_MAX;
9319 }
9320
9321 uint64_t u64EffAddr;
9322
9323 /* Handle the rip+disp32 form with no registers first. */
9324 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9325 {
9326 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9327 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9328 }
9329 else
9330 {
9331 /* Get the register (or SIB) value. */
9332 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9333 {
9334 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9335 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9336 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9337 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9338 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9339 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9340 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9341 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9342 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9343 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9344 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9345 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9346 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9347 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9348 /* SIB */
9349 case 4:
9350 case 12:
9351 {
9352 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9353
9354 /* Get the index and scale it. */
9355 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9356 {
9357 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9358 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9359 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9360 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9361 case 4: u64EffAddr = 0; /*none */ break;
9362 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9363 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9364 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9365 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9366 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9367 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9368 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9369 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9370 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9371 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9372 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9373 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9374 }
9375 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9376
9377 /* add base */
9378 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9379 {
9380 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9381 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9382 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9383 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9384 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9385 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9386 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9387 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9388 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9389 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9390 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9391 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9392 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9393 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9394 /* complicated encodings */
9395 case 5:
9396 case 13:
9397 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9398 {
9399 if (!pVCpu->iem.s.uRexB)
9400 {
9401 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9402 SET_SS_DEF();
9403 }
9404 else
9405 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9406 }
9407 else
9408 {
9409 uint32_t u32Disp;
9410 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9411 u64EffAddr += (int32_t)u32Disp;
9412 }
9413 break;
9414 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9415 }
9416 break;
9417 }
9418 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9419 }
9420
9421 /* Get and add the displacement. */
9422 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9423 {
9424 case 0:
9425 break;
9426 case 1:
9427 {
9428 int8_t i8Disp;
9429 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9430 u64EffAddr += i8Disp;
9431 break;
9432 }
9433 case 2:
9434 {
9435 uint32_t u32Disp;
9436 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9437 u64EffAddr += (int32_t)u32Disp;
9438 break;
9439 }
9440 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9441 }
9442
9443 }
9444
9445 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9446 {
9447 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9448 return u64EffAddr;
9449 }
9450 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9451 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9452 return u64EffAddr & UINT32_MAX;
9453}
9454#endif /* IEM_WITH_SETJMP */
9455
9456
9457/**
9458 * Calculates the effective address of a ModR/M memory operand, extended version
9459 * for use in the recompilers.
9460 *
9461 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9462 *
9463 * @return Strict VBox status code.
9464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9465 * @param bRm The ModRM byte.
9466 * @param cbImmAndRspOffset - First byte: The size of any immediate
9467 * following the effective address opcode bytes
9468 * (only for RIP relative addressing).
9469 * - Second byte: RSP displacement (for POP [ESP]).
9470 * @param pGCPtrEff Where to return the effective address.
9471 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9472 * SIB byte (bits 39:32).
9473 */
9474VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9475{
9476 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9477# define SET_SS_DEF() \
9478 do \
9479 { \
9480 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9481 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9482 } while (0)
9483
9484 uint64_t uInfo;
9485 if (!IEM_IS_64BIT_CODE(pVCpu))
9486 {
9487/** @todo Check the effective address size crap! */
9488 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9489 {
9490 uint16_t u16EffAddr;
9491
9492 /* Handle the disp16 form with no registers first. */
9493 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9494 {
9495 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9496 uInfo = u16EffAddr;
9497 }
9498 else
9499 {
9500 /* Get the displacment. */
9501 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9502 {
9503 case 0: u16EffAddr = 0; break;
9504 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9505 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9506 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9507 }
9508 uInfo = u16EffAddr;
9509
9510 /* Add the base and index registers to the disp. */
9511 switch (bRm & X86_MODRM_RM_MASK)
9512 {
9513 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9514 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9515 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9516 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9517 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9518 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9519 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9520 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9521 }
9522 }
9523
9524 *pGCPtrEff = u16EffAddr;
9525 }
9526 else
9527 {
9528 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9529 uint32_t u32EffAddr;
9530
9531 /* Handle the disp32 form with no registers first. */
9532 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9533 {
9534 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9535 uInfo = u32EffAddr;
9536 }
9537 else
9538 {
9539 /* Get the register (or SIB) value. */
9540 uInfo = 0;
9541 switch ((bRm & X86_MODRM_RM_MASK))
9542 {
9543 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9544 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9545 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9546 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9547 case 4: /* SIB */
9548 {
9549 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9550 uInfo = (uint64_t)bSib << 32;
9551
9552 /* Get the index and scale it. */
9553 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9554 {
9555 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9556 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9557 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9558 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9559 case 4: u32EffAddr = 0; /*none */ break;
9560 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9561 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9562 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9563 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9564 }
9565 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9566
9567 /* add base */
9568 switch (bSib & X86_SIB_BASE_MASK)
9569 {
9570 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9571 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9572 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9573 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9574 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9575 case 5:
9576 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9577 {
9578 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9579 SET_SS_DEF();
9580 }
9581 else
9582 {
9583 uint32_t u32Disp;
9584 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9585 u32EffAddr += u32Disp;
9586 uInfo |= u32Disp;
9587 }
9588 break;
9589 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9590 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9591 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9592 }
9593 break;
9594 }
9595 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9596 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9597 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9598 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9599 }
9600
9601 /* Get and add the displacement. */
9602 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9603 {
9604 case 0:
9605 break;
9606 case 1:
9607 {
9608 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9609 u32EffAddr += i8Disp;
9610 uInfo |= (uint32_t)(int32_t)i8Disp;
9611 break;
9612 }
9613 case 2:
9614 {
9615 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9616 u32EffAddr += u32Disp;
9617 uInfo |= (uint32_t)u32Disp;
9618 break;
9619 }
9620 default:
9621 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9622 }
9623
9624 }
9625 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9626 *pGCPtrEff = u32EffAddr;
9627 else
9628 {
9629 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9630 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9631 }
9632 }
9633 }
9634 else
9635 {
9636 uint64_t u64EffAddr;
9637
9638 /* Handle the rip+disp32 form with no registers first. */
9639 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9640 {
9641 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9642 uInfo = (uint32_t)u64EffAddr;
9643 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9644 }
9645 else
9646 {
9647 /* Get the register (or SIB) value. */
9648 uInfo = 0;
9649 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9650 {
9651 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9652 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9653 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9654 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9655 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9656 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9657 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9658 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9659 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9660 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9661 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9662 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9663 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9664 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9665 /* SIB */
9666 case 4:
9667 case 12:
9668 {
9669 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9670 uInfo = (uint64_t)bSib << 32;
9671
9672 /* Get the index and scale it. */
9673 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9674 {
9675 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9676 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9677 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9678 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9679 case 4: u64EffAddr = 0; /*none */ break;
9680 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9681 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9682 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9683 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9684 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9685 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9686 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9687 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9688 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9689 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9690 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9691 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9692 }
9693 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9694
9695 /* add base */
9696 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9697 {
9698 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9699 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9700 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9701 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9702 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9703 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9704 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9705 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9706 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9707 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9708 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9709 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9710 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9711 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9712 /* complicated encodings */
9713 case 5:
9714 case 13:
9715 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9716 {
9717 if (!pVCpu->iem.s.uRexB)
9718 {
9719 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9720 SET_SS_DEF();
9721 }
9722 else
9723 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9724 }
9725 else
9726 {
9727 uint32_t u32Disp;
9728 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9729 u64EffAddr += (int32_t)u32Disp;
9730 uInfo |= u32Disp;
9731 }
9732 break;
9733 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9734 }
9735 break;
9736 }
9737 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9738 }
9739
9740 /* Get and add the displacement. */
9741 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9742 {
9743 case 0:
9744 break;
9745 case 1:
9746 {
9747 int8_t i8Disp;
9748 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9749 u64EffAddr += i8Disp;
9750 uInfo |= (uint32_t)(int32_t)i8Disp;
9751 break;
9752 }
9753 case 2:
9754 {
9755 uint32_t u32Disp;
9756 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9757 u64EffAddr += (int32_t)u32Disp;
9758 uInfo |= u32Disp;
9759 break;
9760 }
9761 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9762 }
9763
9764 }
9765
9766 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9767 *pGCPtrEff = u64EffAddr;
9768 else
9769 {
9770 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9771 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9772 }
9773 }
9774 *puInfo = uInfo;
9775
9776 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9777 return VINF_SUCCESS;
9778}
9779
9780/** @} */
9781
9782
9783#ifdef LOG_ENABLED
9784/**
9785 * Logs the current instruction.
9786 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9787 * @param fSameCtx Set if we have the same context information as the VMM,
9788 * clear if we may have already executed an instruction in
9789 * our debug context. When clear, we assume IEMCPU holds
9790 * valid CPU mode info.
9791 *
9792 * The @a fSameCtx parameter is now misleading and obsolete.
9793 * @param pszFunction The IEM function doing the execution.
9794 */
9795static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9796{
9797# ifdef IN_RING3
9798 if (LogIs2Enabled())
9799 {
9800 char szInstr[256];
9801 uint32_t cbInstr = 0;
9802 if (fSameCtx)
9803 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9804 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9805 szInstr, sizeof(szInstr), &cbInstr);
9806 else
9807 {
9808 uint32_t fFlags = 0;
9809 switch (IEM_GET_CPU_MODE(pVCpu))
9810 {
9811 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9812 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9813 case IEMMODE_16BIT:
9814 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9815 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9816 else
9817 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9818 break;
9819 }
9820 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9821 szInstr, sizeof(szInstr), &cbInstr);
9822 }
9823
9824 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9825 Log2(("**** %s fExec=%x\n"
9826 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9827 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9828 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9829 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9830 " %s\n"
9831 , pszFunction, pVCpu->iem.s.fExec,
9832 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9833 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9834 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9835 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9836 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9837 szInstr));
9838
9839 if (LogIs3Enabled())
9840 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9841 }
9842 else
9843# endif
9844 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9845 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9846 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9847}
9848#endif /* LOG_ENABLED */
9849
9850
9851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9852/**
9853 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9854 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9855 *
9856 * @returns Modified rcStrict.
9857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9858 * @param rcStrict The instruction execution status.
9859 */
9860static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9861{
9862 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9863 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9864 {
9865 /* VMX preemption timer takes priority over NMI-window exits. */
9866 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9867 {
9868 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9869 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9870 }
9871 /*
9872 * Check remaining intercepts.
9873 *
9874 * NMI-window and Interrupt-window VM-exits.
9875 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9876 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9877 *
9878 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9879 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9880 */
9881 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9882 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9883 && !TRPMHasTrap(pVCpu))
9884 {
9885 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9886 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9887 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9888 {
9889 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9890 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9891 }
9892 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9893 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9894 {
9895 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9896 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9897 }
9898 }
9899 }
9900 /* TPR-below threshold/APIC write has the highest priority. */
9901 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9902 {
9903 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9904 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9905 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9906 }
9907 /* MTF takes priority over VMX-preemption timer. */
9908 else
9909 {
9910 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9911 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9912 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9913 }
9914 return rcStrict;
9915}
9916#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9917
9918
9919/**
9920 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9921 * IEMExecOneWithPrefetchedByPC.
9922 *
9923 * Similar code is found in IEMExecLots.
9924 *
9925 * @return Strict VBox status code.
9926 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9927 * @param fExecuteInhibit If set, execute the instruction following CLI,
9928 * POP SS and MOV SS,GR.
9929 * @param pszFunction The calling function name.
9930 */
9931DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9932{
9933 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9934 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9935 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9936 RT_NOREF_PV(pszFunction);
9937
9938#ifdef IEM_WITH_SETJMP
9939 VBOXSTRICTRC rcStrict;
9940 IEM_TRY_SETJMP(pVCpu, rcStrict)
9941 {
9942 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9943 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9944 }
9945 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9946 {
9947 pVCpu->iem.s.cLongJumps++;
9948 }
9949 IEM_CATCH_LONGJMP_END(pVCpu);
9950#else
9951 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9952 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9953#endif
9954 if (rcStrict == VINF_SUCCESS)
9955 pVCpu->iem.s.cInstructions++;
9956 if (pVCpu->iem.s.cActiveMappings > 0)
9957 {
9958 Assert(rcStrict != VINF_SUCCESS);
9959 iemMemRollback(pVCpu);
9960 }
9961 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9962 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9963 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9964
9965//#ifdef DEBUG
9966// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9967//#endif
9968
9969#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9970 /*
9971 * Perform any VMX nested-guest instruction boundary actions.
9972 *
9973 * If any of these causes a VM-exit, we must skip executing the next
9974 * instruction (would run into stale page tables). A VM-exit makes sure
9975 * there is no interrupt-inhibition, so that should ensure we don't go
9976 * to try execute the next instruction. Clearing fExecuteInhibit is
9977 * problematic because of the setjmp/longjmp clobbering above.
9978 */
9979 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9980 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9981 || rcStrict != VINF_SUCCESS)
9982 { /* likely */ }
9983 else
9984 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9985#endif
9986
9987 /* Execute the next instruction as well if a cli, pop ss or
9988 mov ss, Gr has just completed successfully. */
9989 if ( fExecuteInhibit
9990 && rcStrict == VINF_SUCCESS
9991 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9992 {
9993 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9994 if (rcStrict == VINF_SUCCESS)
9995 {
9996#ifdef LOG_ENABLED
9997 iemLogCurInstr(pVCpu, false, pszFunction);
9998#endif
9999#ifdef IEM_WITH_SETJMP
10000 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10001 {
10002 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10003 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10004 }
10005 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10006 {
10007 pVCpu->iem.s.cLongJumps++;
10008 }
10009 IEM_CATCH_LONGJMP_END(pVCpu);
10010#else
10011 IEM_OPCODE_GET_FIRST_U8(&b);
10012 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10013#endif
10014 if (rcStrict == VINF_SUCCESS)
10015 {
10016 pVCpu->iem.s.cInstructions++;
10017#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10018 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10019 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10020 { /* likely */ }
10021 else
10022 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10023#endif
10024 }
10025 if (pVCpu->iem.s.cActiveMappings > 0)
10026 {
10027 Assert(rcStrict != VINF_SUCCESS);
10028 iemMemRollback(pVCpu);
10029 }
10030 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10031 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10032 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10033 }
10034 else if (pVCpu->iem.s.cActiveMappings > 0)
10035 iemMemRollback(pVCpu);
10036 /** @todo drop this after we bake this change into RIP advancing. */
10037 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10038 }
10039
10040 /*
10041 * Return value fiddling, statistics and sanity assertions.
10042 */
10043 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10044
10045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10046 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10047 return rcStrict;
10048}
10049
10050
10051/**
10052 * Execute one instruction.
10053 *
10054 * @return Strict VBox status code.
10055 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10056 */
10057VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10058{
10059 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10060#ifdef LOG_ENABLED
10061 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10062#endif
10063
10064 /*
10065 * Do the decoding and emulation.
10066 */
10067 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10068 if (rcStrict == VINF_SUCCESS)
10069 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10070 else if (pVCpu->iem.s.cActiveMappings > 0)
10071 iemMemRollback(pVCpu);
10072
10073 if (rcStrict != VINF_SUCCESS)
10074 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10075 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10076 return rcStrict;
10077}
10078
10079
10080VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10081{
10082 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10083 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10084 if (rcStrict == VINF_SUCCESS)
10085 {
10086 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10087 if (pcbWritten)
10088 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10089 }
10090 else if (pVCpu->iem.s.cActiveMappings > 0)
10091 iemMemRollback(pVCpu);
10092
10093 return rcStrict;
10094}
10095
10096
10097VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10098 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10099{
10100 VBOXSTRICTRC rcStrict;
10101 if ( cbOpcodeBytes
10102 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10103 {
10104 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10105#ifdef IEM_WITH_CODE_TLB
10106 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10107 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10108 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10109 pVCpu->iem.s.offCurInstrStart = 0;
10110 pVCpu->iem.s.offInstrNextByte = 0;
10111 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10112#else
10113 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10114 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10115#endif
10116 rcStrict = VINF_SUCCESS;
10117 }
10118 else
10119 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10120 if (rcStrict == VINF_SUCCESS)
10121 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10122 else if (pVCpu->iem.s.cActiveMappings > 0)
10123 iemMemRollback(pVCpu);
10124
10125 return rcStrict;
10126}
10127
10128
10129VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10130{
10131 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10132 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10133 if (rcStrict == VINF_SUCCESS)
10134 {
10135 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10136 if (pcbWritten)
10137 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10138 }
10139 else if (pVCpu->iem.s.cActiveMappings > 0)
10140 iemMemRollback(pVCpu);
10141
10142 return rcStrict;
10143}
10144
10145
10146VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10147 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10148{
10149 VBOXSTRICTRC rcStrict;
10150 if ( cbOpcodeBytes
10151 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10152 {
10153 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10154#ifdef IEM_WITH_CODE_TLB
10155 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10156 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10157 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10158 pVCpu->iem.s.offCurInstrStart = 0;
10159 pVCpu->iem.s.offInstrNextByte = 0;
10160 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10161#else
10162 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10163 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10164#endif
10165 rcStrict = VINF_SUCCESS;
10166 }
10167 else
10168 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10169 if (rcStrict == VINF_SUCCESS)
10170 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10171 else if (pVCpu->iem.s.cActiveMappings > 0)
10172 iemMemRollback(pVCpu);
10173
10174 return rcStrict;
10175}
10176
10177
10178/**
10179 * For handling split cacheline lock operations when the host has split-lock
10180 * detection enabled.
10181 *
10182 * This will cause the interpreter to disregard the lock prefix and implicit
10183 * locking (xchg).
10184 *
10185 * @returns Strict VBox status code.
10186 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10187 */
10188VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10189{
10190 /*
10191 * Do the decoding and emulation.
10192 */
10193 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10194 if (rcStrict == VINF_SUCCESS)
10195 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10196 else if (pVCpu->iem.s.cActiveMappings > 0)
10197 iemMemRollback(pVCpu);
10198
10199 if (rcStrict != VINF_SUCCESS)
10200 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10201 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10202 return rcStrict;
10203}
10204
10205
10206/**
10207 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10208 * inject a pending TRPM trap.
10209 */
10210VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10211{
10212 Assert(TRPMHasTrap(pVCpu));
10213
10214 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10215 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10216 {
10217 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10218#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10219 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10220 if (fIntrEnabled)
10221 {
10222 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10223 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10224 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10225 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10226 else
10227 {
10228 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10229 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10230 }
10231 }
10232#else
10233 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10234#endif
10235 if (fIntrEnabled)
10236 {
10237 uint8_t u8TrapNo;
10238 TRPMEVENT enmType;
10239 uint32_t uErrCode;
10240 RTGCPTR uCr2;
10241 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10242 AssertRC(rc2);
10243 Assert(enmType == TRPM_HARDWARE_INT);
10244 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10245
10246 TRPMResetTrap(pVCpu);
10247
10248#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10249 /* Injecting an event may cause a VM-exit. */
10250 if ( rcStrict != VINF_SUCCESS
10251 && rcStrict != VINF_IEM_RAISED_XCPT)
10252 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10253#else
10254 NOREF(rcStrict);
10255#endif
10256 }
10257 }
10258
10259 return VINF_SUCCESS;
10260}
10261
10262
10263VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10264{
10265 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10266 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10267 Assert(cMaxInstructions > 0);
10268
10269 /*
10270 * See if there is an interrupt pending in TRPM, inject it if we can.
10271 */
10272 /** @todo What if we are injecting an exception and not an interrupt? Is that
10273 * possible here? For now we assert it is indeed only an interrupt. */
10274 if (!TRPMHasTrap(pVCpu))
10275 { /* likely */ }
10276 else
10277 {
10278 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10280 { /*likely */ }
10281 else
10282 return rcStrict;
10283 }
10284
10285 /*
10286 * Initial decoder init w/ prefetch, then setup setjmp.
10287 */
10288 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10289 if (rcStrict == VINF_SUCCESS)
10290 {
10291#ifdef IEM_WITH_SETJMP
10292 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10293 IEM_TRY_SETJMP(pVCpu, rcStrict)
10294#endif
10295 {
10296 /*
10297 * The run loop. We limit ourselves to 4096 instructions right now.
10298 */
10299 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10300 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10301 for (;;)
10302 {
10303 /*
10304 * Log the state.
10305 */
10306#ifdef LOG_ENABLED
10307 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10308#endif
10309
10310 /*
10311 * Do the decoding and emulation.
10312 */
10313 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10314 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10315#ifdef VBOX_STRICT
10316 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10317#endif
10318 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10319 {
10320 Assert(pVCpu->iem.s.cActiveMappings == 0);
10321 pVCpu->iem.s.cInstructions++;
10322
10323#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10324 /* Perform any VMX nested-guest instruction boundary actions. */
10325 uint64_t fCpu = pVCpu->fLocalForcedActions;
10326 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10327 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10328 { /* likely */ }
10329 else
10330 {
10331 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10332 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10333 fCpu = pVCpu->fLocalForcedActions;
10334 else
10335 {
10336 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10337 break;
10338 }
10339 }
10340#endif
10341 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10342 {
10343#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10344 uint64_t fCpu = pVCpu->fLocalForcedActions;
10345#endif
10346 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10347 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10348 | VMCPU_FF_TLB_FLUSH
10349 | VMCPU_FF_UNHALT );
10350
10351 if (RT_LIKELY( ( !fCpu
10352 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10353 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10354 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10355 {
10356 if (--cMaxInstructionsGccStupidity > 0)
10357 {
10358 /* Poll timers every now an then according to the caller's specs. */
10359 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10360 || !TMTimerPollBool(pVM, pVCpu))
10361 {
10362 Assert(pVCpu->iem.s.cActiveMappings == 0);
10363 iemReInitDecoder(pVCpu);
10364 continue;
10365 }
10366 }
10367 }
10368 }
10369 Assert(pVCpu->iem.s.cActiveMappings == 0);
10370 }
10371 else if (pVCpu->iem.s.cActiveMappings > 0)
10372 iemMemRollback(pVCpu);
10373 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10374 break;
10375 }
10376 }
10377#ifdef IEM_WITH_SETJMP
10378 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10379 {
10380 if (pVCpu->iem.s.cActiveMappings > 0)
10381 iemMemRollback(pVCpu);
10382# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10383 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10384# endif
10385 pVCpu->iem.s.cLongJumps++;
10386 }
10387 IEM_CATCH_LONGJMP_END(pVCpu);
10388#endif
10389
10390 /*
10391 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10392 */
10393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10395 }
10396 else
10397 {
10398 if (pVCpu->iem.s.cActiveMappings > 0)
10399 iemMemRollback(pVCpu);
10400
10401#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10402 /*
10403 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10404 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10405 */
10406 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10407#endif
10408 }
10409
10410 /*
10411 * Maybe re-enter raw-mode and log.
10412 */
10413 if (rcStrict != VINF_SUCCESS)
10414 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10415 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10416 if (pcInstructions)
10417 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10418 return rcStrict;
10419}
10420
10421
10422/**
10423 * Interface used by EMExecuteExec, does exit statistics and limits.
10424 *
10425 * @returns Strict VBox status code.
10426 * @param pVCpu The cross context virtual CPU structure.
10427 * @param fWillExit To be defined.
10428 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10429 * @param cMaxInstructions Maximum number of instructions to execute.
10430 * @param cMaxInstructionsWithoutExits
10431 * The max number of instructions without exits.
10432 * @param pStats Where to return statistics.
10433 */
10434VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10435 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10436{
10437 NOREF(fWillExit); /** @todo define flexible exit crits */
10438
10439 /*
10440 * Initialize return stats.
10441 */
10442 pStats->cInstructions = 0;
10443 pStats->cExits = 0;
10444 pStats->cMaxExitDistance = 0;
10445 pStats->cReserved = 0;
10446
10447 /*
10448 * Initial decoder init w/ prefetch, then setup setjmp.
10449 */
10450 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10451 if (rcStrict == VINF_SUCCESS)
10452 {
10453#ifdef IEM_WITH_SETJMP
10454 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10455 IEM_TRY_SETJMP(pVCpu, rcStrict)
10456#endif
10457 {
10458#ifdef IN_RING0
10459 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10460#endif
10461 uint32_t cInstructionSinceLastExit = 0;
10462
10463 /*
10464 * The run loop. We limit ourselves to 4096 instructions right now.
10465 */
10466 PVM pVM = pVCpu->CTX_SUFF(pVM);
10467 for (;;)
10468 {
10469 /*
10470 * Log the state.
10471 */
10472#ifdef LOG_ENABLED
10473 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10474#endif
10475
10476 /*
10477 * Do the decoding and emulation.
10478 */
10479 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10480
10481 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10482 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10483
10484 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10485 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10486 {
10487 pStats->cExits += 1;
10488 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10489 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10490 cInstructionSinceLastExit = 0;
10491 }
10492
10493 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10494 {
10495 Assert(pVCpu->iem.s.cActiveMappings == 0);
10496 pVCpu->iem.s.cInstructions++;
10497 pStats->cInstructions++;
10498 cInstructionSinceLastExit++;
10499
10500#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10501 /* Perform any VMX nested-guest instruction boundary actions. */
10502 uint64_t fCpu = pVCpu->fLocalForcedActions;
10503 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10504 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10505 { /* likely */ }
10506 else
10507 {
10508 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10509 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10510 fCpu = pVCpu->fLocalForcedActions;
10511 else
10512 {
10513 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10514 break;
10515 }
10516 }
10517#endif
10518 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10519 {
10520#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10521 uint64_t fCpu = pVCpu->fLocalForcedActions;
10522#endif
10523 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10524 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10525 | VMCPU_FF_TLB_FLUSH
10526 | VMCPU_FF_UNHALT );
10527 if (RT_LIKELY( ( ( !fCpu
10528 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10529 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10530 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10531 || pStats->cInstructions < cMinInstructions))
10532 {
10533 if (pStats->cInstructions < cMaxInstructions)
10534 {
10535 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10536 {
10537#ifdef IN_RING0
10538 if ( !fCheckPreemptionPending
10539 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10540#endif
10541 {
10542 Assert(pVCpu->iem.s.cActiveMappings == 0);
10543 iemReInitDecoder(pVCpu);
10544 continue;
10545 }
10546#ifdef IN_RING0
10547 rcStrict = VINF_EM_RAW_INTERRUPT;
10548 break;
10549#endif
10550 }
10551 }
10552 }
10553 Assert(!(fCpu & VMCPU_FF_IEM));
10554 }
10555 Assert(pVCpu->iem.s.cActiveMappings == 0);
10556 }
10557 else if (pVCpu->iem.s.cActiveMappings > 0)
10558 iemMemRollback(pVCpu);
10559 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10560 break;
10561 }
10562 }
10563#ifdef IEM_WITH_SETJMP
10564 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10565 {
10566 if (pVCpu->iem.s.cActiveMappings > 0)
10567 iemMemRollback(pVCpu);
10568 pVCpu->iem.s.cLongJumps++;
10569 }
10570 IEM_CATCH_LONGJMP_END(pVCpu);
10571#endif
10572
10573 /*
10574 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10575 */
10576 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10577 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10578 }
10579 else
10580 {
10581 if (pVCpu->iem.s.cActiveMappings > 0)
10582 iemMemRollback(pVCpu);
10583
10584#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10585 /*
10586 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10587 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10588 */
10589 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10590#endif
10591 }
10592
10593 /*
10594 * Maybe re-enter raw-mode and log.
10595 */
10596 if (rcStrict != VINF_SUCCESS)
10597 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10598 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10599 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10600 return rcStrict;
10601}
10602
10603
10604/**
10605 * Injects a trap, fault, abort, software interrupt or external interrupt.
10606 *
10607 * The parameter list matches TRPMQueryTrapAll pretty closely.
10608 *
10609 * @returns Strict VBox status code.
10610 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10611 * @param u8TrapNo The trap number.
10612 * @param enmType What type is it (trap/fault/abort), software
10613 * interrupt or hardware interrupt.
10614 * @param uErrCode The error code if applicable.
10615 * @param uCr2 The CR2 value if applicable.
10616 * @param cbInstr The instruction length (only relevant for
10617 * software interrupts).
10618 */
10619VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10620 uint8_t cbInstr)
10621{
10622 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10623#ifdef DBGFTRACE_ENABLED
10624 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10625 u8TrapNo, enmType, uErrCode, uCr2);
10626#endif
10627
10628 uint32_t fFlags;
10629 switch (enmType)
10630 {
10631 case TRPM_HARDWARE_INT:
10632 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10633 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10634 uErrCode = uCr2 = 0;
10635 break;
10636
10637 case TRPM_SOFTWARE_INT:
10638 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10639 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10640 uErrCode = uCr2 = 0;
10641 break;
10642
10643 case TRPM_TRAP:
10644 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10645 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10646 if (u8TrapNo == X86_XCPT_PF)
10647 fFlags |= IEM_XCPT_FLAGS_CR2;
10648 switch (u8TrapNo)
10649 {
10650 case X86_XCPT_DF:
10651 case X86_XCPT_TS:
10652 case X86_XCPT_NP:
10653 case X86_XCPT_SS:
10654 case X86_XCPT_PF:
10655 case X86_XCPT_AC:
10656 case X86_XCPT_GP:
10657 fFlags |= IEM_XCPT_FLAGS_ERR;
10658 break;
10659 }
10660 break;
10661
10662 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10663 }
10664
10665 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10666
10667 if (pVCpu->iem.s.cActiveMappings > 0)
10668 iemMemRollback(pVCpu);
10669
10670 return rcStrict;
10671}
10672
10673
10674/**
10675 * Injects the active TRPM event.
10676 *
10677 * @returns Strict VBox status code.
10678 * @param pVCpu The cross context virtual CPU structure.
10679 */
10680VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10681{
10682#ifndef IEM_IMPLEMENTS_TASKSWITCH
10683 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10684#else
10685 uint8_t u8TrapNo;
10686 TRPMEVENT enmType;
10687 uint32_t uErrCode;
10688 RTGCUINTPTR uCr2;
10689 uint8_t cbInstr;
10690 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10691 if (RT_FAILURE(rc))
10692 return rc;
10693
10694 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10695 * ICEBP \#DB injection as a special case. */
10696 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10697#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10698 if (rcStrict == VINF_SVM_VMEXIT)
10699 rcStrict = VINF_SUCCESS;
10700#endif
10701#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10702 if (rcStrict == VINF_VMX_VMEXIT)
10703 rcStrict = VINF_SUCCESS;
10704#endif
10705 /** @todo Are there any other codes that imply the event was successfully
10706 * delivered to the guest? See @bugref{6607}. */
10707 if ( rcStrict == VINF_SUCCESS
10708 || rcStrict == VINF_IEM_RAISED_XCPT)
10709 TRPMResetTrap(pVCpu);
10710
10711 return rcStrict;
10712#endif
10713}
10714
10715
10716VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10717{
10718 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10719 return VERR_NOT_IMPLEMENTED;
10720}
10721
10722
10723VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10724{
10725 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10726 return VERR_NOT_IMPLEMENTED;
10727}
10728
10729
10730/**
10731 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10732 *
10733 * This API ASSUMES that the caller has already verified that the guest code is
10734 * allowed to access the I/O port. (The I/O port is in the DX register in the
10735 * guest state.)
10736 *
10737 * @returns Strict VBox status code.
10738 * @param pVCpu The cross context virtual CPU structure.
10739 * @param cbValue The size of the I/O port access (1, 2, or 4).
10740 * @param enmAddrMode The addressing mode.
10741 * @param fRepPrefix Indicates whether a repeat prefix is used
10742 * (doesn't matter which for this instruction).
10743 * @param cbInstr The instruction length in bytes.
10744 * @param iEffSeg The effective segment address.
10745 * @param fIoChecked Whether the access to the I/O port has been
10746 * checked or not. It's typically checked in the
10747 * HM scenario.
10748 */
10749VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10750 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10751{
10752 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10753 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10754
10755 /*
10756 * State init.
10757 */
10758 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10759
10760 /*
10761 * Switch orgy for getting to the right handler.
10762 */
10763 VBOXSTRICTRC rcStrict;
10764 if (fRepPrefix)
10765 {
10766 switch (enmAddrMode)
10767 {
10768 case IEMMODE_16BIT:
10769 switch (cbValue)
10770 {
10771 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10772 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10773 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10774 default:
10775 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10776 }
10777 break;
10778
10779 case IEMMODE_32BIT:
10780 switch (cbValue)
10781 {
10782 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10783 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10784 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10785 default:
10786 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10787 }
10788 break;
10789
10790 case IEMMODE_64BIT:
10791 switch (cbValue)
10792 {
10793 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10794 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10795 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10796 default:
10797 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10798 }
10799 break;
10800
10801 default:
10802 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10803 }
10804 }
10805 else
10806 {
10807 switch (enmAddrMode)
10808 {
10809 case IEMMODE_16BIT:
10810 switch (cbValue)
10811 {
10812 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10813 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10814 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10815 default:
10816 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10817 }
10818 break;
10819
10820 case IEMMODE_32BIT:
10821 switch (cbValue)
10822 {
10823 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10824 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10825 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10826 default:
10827 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10828 }
10829 break;
10830
10831 case IEMMODE_64BIT:
10832 switch (cbValue)
10833 {
10834 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10835 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10836 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10837 default:
10838 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10839 }
10840 break;
10841
10842 default:
10843 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10844 }
10845 }
10846
10847 if (pVCpu->iem.s.cActiveMappings)
10848 iemMemRollback(pVCpu);
10849
10850 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10851}
10852
10853
10854/**
10855 * Interface for HM and EM for executing string I/O IN (read) instructions.
10856 *
10857 * This API ASSUMES that the caller has already verified that the guest code is
10858 * allowed to access the I/O port. (The I/O port is in the DX register in the
10859 * guest state.)
10860 *
10861 * @returns Strict VBox status code.
10862 * @param pVCpu The cross context virtual CPU structure.
10863 * @param cbValue The size of the I/O port access (1, 2, or 4).
10864 * @param enmAddrMode The addressing mode.
10865 * @param fRepPrefix Indicates whether a repeat prefix is used
10866 * (doesn't matter which for this instruction).
10867 * @param cbInstr The instruction length in bytes.
10868 * @param fIoChecked Whether the access to the I/O port has been
10869 * checked or not. It's typically checked in the
10870 * HM scenario.
10871 */
10872VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10873 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10874{
10875 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10876
10877 /*
10878 * State init.
10879 */
10880 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10881
10882 /*
10883 * Switch orgy for getting to the right handler.
10884 */
10885 VBOXSTRICTRC rcStrict;
10886 if (fRepPrefix)
10887 {
10888 switch (enmAddrMode)
10889 {
10890 case IEMMODE_16BIT:
10891 switch (cbValue)
10892 {
10893 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10894 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10895 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10896 default:
10897 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10898 }
10899 break;
10900
10901 case IEMMODE_32BIT:
10902 switch (cbValue)
10903 {
10904 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10905 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10906 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10907 default:
10908 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10909 }
10910 break;
10911
10912 case IEMMODE_64BIT:
10913 switch (cbValue)
10914 {
10915 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10916 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10917 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10918 default:
10919 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10920 }
10921 break;
10922
10923 default:
10924 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10925 }
10926 }
10927 else
10928 {
10929 switch (enmAddrMode)
10930 {
10931 case IEMMODE_16BIT:
10932 switch (cbValue)
10933 {
10934 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10935 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10936 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10937 default:
10938 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10939 }
10940 break;
10941
10942 case IEMMODE_32BIT:
10943 switch (cbValue)
10944 {
10945 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10946 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10947 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10948 default:
10949 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10950 }
10951 break;
10952
10953 case IEMMODE_64BIT:
10954 switch (cbValue)
10955 {
10956 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10957 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10958 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10959 default:
10960 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10961 }
10962 break;
10963
10964 default:
10965 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10966 }
10967 }
10968
10969 if ( pVCpu->iem.s.cActiveMappings == 0
10970 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10971 { /* likely */ }
10972 else
10973 {
10974 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10975 iemMemRollback(pVCpu);
10976 }
10977 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10978}
10979
10980
10981/**
10982 * Interface for rawmode to write execute an OUT instruction.
10983 *
10984 * @returns Strict VBox status code.
10985 * @param pVCpu The cross context virtual CPU structure.
10986 * @param cbInstr The instruction length in bytes.
10987 * @param u16Port The port to read.
10988 * @param fImm Whether the port is specified using an immediate operand or
10989 * using the implicit DX register.
10990 * @param cbReg The register size.
10991 *
10992 * @remarks In ring-0 not all of the state needs to be synced in.
10993 */
10994VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10995{
10996 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10997 Assert(cbReg <= 4 && cbReg != 3);
10998
10999 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11000 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11001 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11002 Assert(!pVCpu->iem.s.cActiveMappings);
11003 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11004}
11005
11006
11007/**
11008 * Interface for rawmode to write execute an IN instruction.
11009 *
11010 * @returns Strict VBox status code.
11011 * @param pVCpu The cross context virtual CPU structure.
11012 * @param cbInstr The instruction length in bytes.
11013 * @param u16Port The port to read.
11014 * @param fImm Whether the port is specified using an immediate operand or
11015 * using the implicit DX.
11016 * @param cbReg The register size.
11017 */
11018VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11019{
11020 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11021 Assert(cbReg <= 4 && cbReg != 3);
11022
11023 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11024 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11025 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11026 Assert(!pVCpu->iem.s.cActiveMappings);
11027 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11028}
11029
11030
11031/**
11032 * Interface for HM and EM to write to a CRx register.
11033 *
11034 * @returns Strict VBox status code.
11035 * @param pVCpu The cross context virtual CPU structure.
11036 * @param cbInstr The instruction length in bytes.
11037 * @param iCrReg The control register number (destination).
11038 * @param iGReg The general purpose register number (source).
11039 *
11040 * @remarks In ring-0 not all of the state needs to be synced in.
11041 */
11042VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11043{
11044 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11045 Assert(iCrReg < 16);
11046 Assert(iGReg < 16);
11047
11048 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11049 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11050 Assert(!pVCpu->iem.s.cActiveMappings);
11051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11052}
11053
11054
11055/**
11056 * Interface for HM and EM to read from a CRx register.
11057 *
11058 * @returns Strict VBox status code.
11059 * @param pVCpu The cross context virtual CPU structure.
11060 * @param cbInstr The instruction length in bytes.
11061 * @param iGReg The general purpose register number (destination).
11062 * @param iCrReg The control register number (source).
11063 *
11064 * @remarks In ring-0 not all of the state needs to be synced in.
11065 */
11066VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11067{
11068 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11069 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11070 | CPUMCTX_EXTRN_APIC_TPR);
11071 Assert(iCrReg < 16);
11072 Assert(iGReg < 16);
11073
11074 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11075 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11076 Assert(!pVCpu->iem.s.cActiveMappings);
11077 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11078}
11079
11080
11081/**
11082 * Interface for HM and EM to write to a DRx register.
11083 *
11084 * @returns Strict VBox status code.
11085 * @param pVCpu The cross context virtual CPU structure.
11086 * @param cbInstr The instruction length in bytes.
11087 * @param iDrReg The debug register number (destination).
11088 * @param iGReg The general purpose register number (source).
11089 *
11090 * @remarks In ring-0 not all of the state needs to be synced in.
11091 */
11092VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11093{
11094 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11095 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11096 Assert(iDrReg < 8);
11097 Assert(iGReg < 16);
11098
11099 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11100 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11101 Assert(!pVCpu->iem.s.cActiveMappings);
11102 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11103}
11104
11105
11106/**
11107 * Interface for HM and EM to read from a DRx register.
11108 *
11109 * @returns Strict VBox status code.
11110 * @param pVCpu The cross context virtual CPU structure.
11111 * @param cbInstr The instruction length in bytes.
11112 * @param iGReg The general purpose register number (destination).
11113 * @param iDrReg The debug register number (source).
11114 *
11115 * @remarks In ring-0 not all of the state needs to be synced in.
11116 */
11117VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11118{
11119 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11120 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11121 Assert(iDrReg < 8);
11122 Assert(iGReg < 16);
11123
11124 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11125 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11126 Assert(!pVCpu->iem.s.cActiveMappings);
11127 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11128}
11129
11130
11131/**
11132 * Interface for HM and EM to clear the CR0[TS] bit.
11133 *
11134 * @returns Strict VBox status code.
11135 * @param pVCpu The cross context virtual CPU structure.
11136 * @param cbInstr The instruction length in bytes.
11137 *
11138 * @remarks In ring-0 not all of the state needs to be synced in.
11139 */
11140VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11141{
11142 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11143
11144 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11145 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11146 Assert(!pVCpu->iem.s.cActiveMappings);
11147 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11148}
11149
11150
11151/**
11152 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11153 *
11154 * @returns Strict VBox status code.
11155 * @param pVCpu The cross context virtual CPU structure.
11156 * @param cbInstr The instruction length in bytes.
11157 * @param uValue The value to load into CR0.
11158 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11159 * memory operand. Otherwise pass NIL_RTGCPTR.
11160 *
11161 * @remarks In ring-0 not all of the state needs to be synced in.
11162 */
11163VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11164{
11165 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11166
11167 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11168 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11169 Assert(!pVCpu->iem.s.cActiveMappings);
11170 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11171}
11172
11173
11174/**
11175 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11176 *
11177 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11178 *
11179 * @returns Strict VBox status code.
11180 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11181 * @param cbInstr The instruction length in bytes.
11182 * @remarks In ring-0 not all of the state needs to be synced in.
11183 * @thread EMT(pVCpu)
11184 */
11185VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11186{
11187 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11188
11189 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11190 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11191 Assert(!pVCpu->iem.s.cActiveMappings);
11192 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11193}
11194
11195
11196/**
11197 * Interface for HM and EM to emulate the WBINVD instruction.
11198 *
11199 * @returns Strict VBox status code.
11200 * @param pVCpu The cross context virtual CPU structure.
11201 * @param cbInstr The instruction length in bytes.
11202 *
11203 * @remarks In ring-0 not all of the state needs to be synced in.
11204 */
11205VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11206{
11207 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11208
11209 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11210 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11211 Assert(!pVCpu->iem.s.cActiveMappings);
11212 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11213}
11214
11215
11216/**
11217 * Interface for HM and EM to emulate the INVD instruction.
11218 *
11219 * @returns Strict VBox status code.
11220 * @param pVCpu The cross context virtual CPU structure.
11221 * @param cbInstr The instruction length in bytes.
11222 *
11223 * @remarks In ring-0 not all of the state needs to be synced in.
11224 */
11225VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11226{
11227 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11228
11229 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11230 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11231 Assert(!pVCpu->iem.s.cActiveMappings);
11232 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11233}
11234
11235
11236/**
11237 * Interface for HM and EM to emulate the INVLPG instruction.
11238 *
11239 * @returns Strict VBox status code.
11240 * @retval VINF_PGM_SYNC_CR3
11241 *
11242 * @param pVCpu The cross context virtual CPU structure.
11243 * @param cbInstr The instruction length in bytes.
11244 * @param GCPtrPage The effective address of the page to invalidate.
11245 *
11246 * @remarks In ring-0 not all of the state needs to be synced in.
11247 */
11248VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11249{
11250 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11251
11252 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11253 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11254 Assert(!pVCpu->iem.s.cActiveMappings);
11255 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11256}
11257
11258
11259/**
11260 * Interface for HM and EM to emulate the INVPCID instruction.
11261 *
11262 * @returns Strict VBox status code.
11263 * @retval VINF_PGM_SYNC_CR3
11264 *
11265 * @param pVCpu The cross context virtual CPU structure.
11266 * @param cbInstr The instruction length in bytes.
11267 * @param iEffSeg The effective segment register.
11268 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11269 * @param uType The invalidation type.
11270 *
11271 * @remarks In ring-0 not all of the state needs to be synced in.
11272 */
11273VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11274 uint64_t uType)
11275{
11276 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11277
11278 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11279 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11280 Assert(!pVCpu->iem.s.cActiveMappings);
11281 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11282}
11283
11284
11285/**
11286 * Interface for HM and EM to emulate the CPUID instruction.
11287 *
11288 * @returns Strict VBox status code.
11289 *
11290 * @param pVCpu The cross context virtual CPU structure.
11291 * @param cbInstr The instruction length in bytes.
11292 *
11293 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11294 */
11295VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11296{
11297 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11298 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11299
11300 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11301 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11302 Assert(!pVCpu->iem.s.cActiveMappings);
11303 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11304}
11305
11306
11307/**
11308 * Interface for HM and EM to emulate the RDPMC instruction.
11309 *
11310 * @returns Strict VBox status code.
11311 *
11312 * @param pVCpu The cross context virtual CPU structure.
11313 * @param cbInstr The instruction length in bytes.
11314 *
11315 * @remarks Not all of the state needs to be synced in.
11316 */
11317VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11318{
11319 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11320 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11321
11322 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11323 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11324 Assert(!pVCpu->iem.s.cActiveMappings);
11325 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11326}
11327
11328
11329/**
11330 * Interface for HM and EM to emulate the RDTSC instruction.
11331 *
11332 * @returns Strict VBox status code.
11333 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11334 *
11335 * @param pVCpu The cross context virtual CPU structure.
11336 * @param cbInstr The instruction length in bytes.
11337 *
11338 * @remarks Not all of the state needs to be synced in.
11339 */
11340VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11341{
11342 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11343 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11344
11345 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11346 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11347 Assert(!pVCpu->iem.s.cActiveMappings);
11348 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11349}
11350
11351
11352/**
11353 * Interface for HM and EM to emulate the RDTSCP instruction.
11354 *
11355 * @returns Strict VBox status code.
11356 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11357 *
11358 * @param pVCpu The cross context virtual CPU structure.
11359 * @param cbInstr The instruction length in bytes.
11360 *
11361 * @remarks Not all of the state needs to be synced in. Recommended
11362 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11363 */
11364VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11365{
11366 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11367 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11368
11369 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11370 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11371 Assert(!pVCpu->iem.s.cActiveMappings);
11372 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11373}
11374
11375
11376/**
11377 * Interface for HM and EM to emulate the RDMSR instruction.
11378 *
11379 * @returns Strict VBox status code.
11380 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11381 *
11382 * @param pVCpu The cross context virtual CPU structure.
11383 * @param cbInstr The instruction length in bytes.
11384 *
11385 * @remarks Not all of the state needs to be synced in. Requires RCX and
11386 * (currently) all MSRs.
11387 */
11388VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11389{
11390 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11391 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11392
11393 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11394 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11395 Assert(!pVCpu->iem.s.cActiveMappings);
11396 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11397}
11398
11399
11400/**
11401 * Interface for HM and EM to emulate the WRMSR instruction.
11402 *
11403 * @returns Strict VBox status code.
11404 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11405 *
11406 * @param pVCpu The cross context virtual CPU structure.
11407 * @param cbInstr The instruction length in bytes.
11408 *
11409 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11410 * and (currently) all MSRs.
11411 */
11412VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11413{
11414 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11415 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11416 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11417
11418 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11419 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11420 Assert(!pVCpu->iem.s.cActiveMappings);
11421 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11422}
11423
11424
11425/**
11426 * Interface for HM and EM to emulate the MONITOR instruction.
11427 *
11428 * @returns Strict VBox status code.
11429 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11430 *
11431 * @param pVCpu The cross context virtual CPU structure.
11432 * @param cbInstr The instruction length in bytes.
11433 *
11434 * @remarks Not all of the state needs to be synced in.
11435 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11436 * are used.
11437 */
11438VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11439{
11440 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11441 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11442
11443 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11444 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11445 Assert(!pVCpu->iem.s.cActiveMappings);
11446 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11447}
11448
11449
11450/**
11451 * Interface for HM and EM to emulate the MWAIT instruction.
11452 *
11453 * @returns Strict VBox status code.
11454 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11455 *
11456 * @param pVCpu The cross context virtual CPU structure.
11457 * @param cbInstr The instruction length in bytes.
11458 *
11459 * @remarks Not all of the state needs to be synced in.
11460 */
11461VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11462{
11463 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11464 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11465
11466 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11467 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11468 Assert(!pVCpu->iem.s.cActiveMappings);
11469 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11470}
11471
11472
11473/**
11474 * Interface for HM and EM to emulate the HLT instruction.
11475 *
11476 * @returns Strict VBox status code.
11477 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11478 *
11479 * @param pVCpu The cross context virtual CPU structure.
11480 * @param cbInstr The instruction length in bytes.
11481 *
11482 * @remarks Not all of the state needs to be synced in.
11483 */
11484VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11485{
11486 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11487
11488 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11489 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11490 Assert(!pVCpu->iem.s.cActiveMappings);
11491 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11492}
11493
11494
11495/**
11496 * Checks if IEM is in the process of delivering an event (interrupt or
11497 * exception).
11498 *
11499 * @returns true if we're in the process of raising an interrupt or exception,
11500 * false otherwise.
11501 * @param pVCpu The cross context virtual CPU structure.
11502 * @param puVector Where to store the vector associated with the
11503 * currently delivered event, optional.
11504 * @param pfFlags Where to store th event delivery flags (see
11505 * IEM_XCPT_FLAGS_XXX), optional.
11506 * @param puErr Where to store the error code associated with the
11507 * event, optional.
11508 * @param puCr2 Where to store the CR2 associated with the event,
11509 * optional.
11510 * @remarks The caller should check the flags to determine if the error code and
11511 * CR2 are valid for the event.
11512 */
11513VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11514{
11515 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11516 if (fRaisingXcpt)
11517 {
11518 if (puVector)
11519 *puVector = pVCpu->iem.s.uCurXcpt;
11520 if (pfFlags)
11521 *pfFlags = pVCpu->iem.s.fCurXcpt;
11522 if (puErr)
11523 *puErr = pVCpu->iem.s.uCurXcptErr;
11524 if (puCr2)
11525 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11526 }
11527 return fRaisingXcpt;
11528}
11529
11530#ifdef IN_RING3
11531
11532/**
11533 * Handles the unlikely and probably fatal merge cases.
11534 *
11535 * @returns Merged status code.
11536 * @param rcStrict Current EM status code.
11537 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11538 * with @a rcStrict.
11539 * @param iMemMap The memory mapping index. For error reporting only.
11540 * @param pVCpu The cross context virtual CPU structure of the calling
11541 * thread, for error reporting only.
11542 */
11543DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11544 unsigned iMemMap, PVMCPUCC pVCpu)
11545{
11546 if (RT_FAILURE_NP(rcStrict))
11547 return rcStrict;
11548
11549 if (RT_FAILURE_NP(rcStrictCommit))
11550 return rcStrictCommit;
11551
11552 if (rcStrict == rcStrictCommit)
11553 return rcStrictCommit;
11554
11555 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11556 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11557 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11558 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11559 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11560 return VERR_IOM_FF_STATUS_IPE;
11561}
11562
11563
11564/**
11565 * Helper for IOMR3ProcessForceFlag.
11566 *
11567 * @returns Merged status code.
11568 * @param rcStrict Current EM status code.
11569 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11570 * with @a rcStrict.
11571 * @param iMemMap The memory mapping index. For error reporting only.
11572 * @param pVCpu The cross context virtual CPU structure of the calling
11573 * thread, for error reporting only.
11574 */
11575DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11576{
11577 /* Simple. */
11578 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11579 return rcStrictCommit;
11580
11581 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11582 return rcStrict;
11583
11584 /* EM scheduling status codes. */
11585 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11586 && rcStrict <= VINF_EM_LAST))
11587 {
11588 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11589 && rcStrictCommit <= VINF_EM_LAST))
11590 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11591 }
11592
11593 /* Unlikely */
11594 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11595}
11596
11597
11598/**
11599 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11600 *
11601 * @returns Merge between @a rcStrict and what the commit operation returned.
11602 * @param pVM The cross context VM structure.
11603 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11604 * @param rcStrict The status code returned by ring-0 or raw-mode.
11605 */
11606VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11607{
11608 /*
11609 * Reset the pending commit.
11610 */
11611 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11612 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11613 ("%#x %#x %#x\n",
11614 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11615 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11616
11617 /*
11618 * Commit the pending bounce buffers (usually just one).
11619 */
11620 unsigned cBufs = 0;
11621 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11622 while (iMemMap-- > 0)
11623 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11624 {
11625 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11626 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11627 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11628
11629 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11630 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11631 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11632
11633 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11634 {
11635 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11636 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11637 pbBuf,
11638 cbFirst,
11639 PGMACCESSORIGIN_IEM);
11640 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11641 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11642 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11643 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11644 }
11645
11646 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11647 {
11648 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11649 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11650 pbBuf + cbFirst,
11651 cbSecond,
11652 PGMACCESSORIGIN_IEM);
11653 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11654 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11655 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11656 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11657 }
11658 cBufs++;
11659 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11660 }
11661
11662 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11663 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11664 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11665 pVCpu->iem.s.cActiveMappings = 0;
11666 return rcStrict;
11667}
11668
11669#endif /* IN_RING3 */
11670
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette