VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100316

Last change on this file since 100316 was 100277, checked in by vboxsync, 20 months ago

VMM/IEM: Reworked the PC -> phys address translation in the recompiler. Generate CS.LIM checks if in the last page of a segment. (todo page crossing) bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 469.8 KB
Line 
1/* $Id: IEMAll.cpp 100277 2023-06-24 02:48:28Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <iprt/asm-math.h>
130#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
131# include <iprt/asm-amd64-x86.h>
132#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
133# include <iprt/asm-arm.h>
134#endif
135#include <iprt/assert.h>
136#include <iprt/string.h>
137#include <iprt/x86.h>
138
139#include "IEMInline.h"
140
141
142/*********************************************************************************************************************************
143* Structures and Typedefs *
144*********************************************************************************************************************************/
145/**
146 * CPU exception classes.
147 */
148typedef enum IEMXCPTCLASS
149{
150 IEMXCPTCLASS_BENIGN,
151 IEMXCPTCLASS_CONTRIBUTORY,
152 IEMXCPTCLASS_PAGE_FAULT,
153 IEMXCPTCLASS_DOUBLE_FAULT
154} IEMXCPTCLASS;
155
156
157/*********************************************************************************************************************************
158* Global Variables *
159*********************************************************************************************************************************/
160#if defined(IEM_LOG_MEMORY_WRITES)
161/** What IEM just wrote. */
162uint8_t g_abIemWrote[256];
163/** How much IEM just wrote. */
164size_t g_cbIemWrote;
165#endif
166
167
168/*********************************************************************************************************************************
169* Internal Functions *
170*********************************************************************************************************************************/
171static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
172 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
173
174
175/**
176 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
177 * path.
178 *
179 * @returns IEM_F_BRK_PENDING_XXX or zero.
180 * @param pVCpu The cross context virtual CPU structure of the
181 * calling thread.
182 *
183 * @note Don't call directly, use iemCalcExecDbgFlags instead.
184 */
185uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
186{
187 uint32_t fExec = 0;
188
189 /*
190 * Process guest breakpoints.
191 */
192#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
193 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
194 { \
195 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
196 { \
197 case X86_DR7_RW_EO: \
198 fExec |= IEM_F_PENDING_BRK_INSTR; \
199 break; \
200 case X86_DR7_RW_WO: \
201 case X86_DR7_RW_RW: \
202 fExec |= IEM_F_PENDING_BRK_DATA; \
203 break; \
204 case X86_DR7_RW_IO: \
205 fExec |= IEM_F_PENDING_BRK_X86_IO; \
206 break; \
207 } \
208 } \
209 } while (0)
210
211 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
212 if (fGstDr7 & X86_DR7_ENABLED_MASK)
213 {
214 PROCESS_ONE_BP(fGstDr7, 0);
215 PROCESS_ONE_BP(fGstDr7, 1);
216 PROCESS_ONE_BP(fGstDr7, 2);
217 PROCESS_ONE_BP(fGstDr7, 3);
218 }
219
220 /*
221 * Process hypervisor breakpoints.
222 */
223 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
224 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
225 {
226 PROCESS_ONE_BP(fHyperDr7, 0);
227 PROCESS_ONE_BP(fHyperDr7, 1);
228 PROCESS_ONE_BP(fHyperDr7, 2);
229 PROCESS_ONE_BP(fHyperDr7, 3);
230 }
231
232 return fExec;
233}
234
235
236/**
237 * Initializes the decoder state.
238 *
239 * iemReInitDecoder is mostly a copy of this function.
240 *
241 * @param pVCpu The cross context virtual CPU structure of the
242 * calling thread.
243 * @param fExecOpts Optional execution flags:
244 * - IEM_F_BYPASS_HANDLERS
245 * - IEM_F_X86_DISREGARD_LOCK
246 */
247DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
248{
249 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
250 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
259
260 /* Execution state: */
261 uint32_t fExec;
262 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
263
264 /* Decoder state: */
265 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
267 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
268 {
269 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
270 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
271 }
272 else
273 {
274 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
275 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
276 }
277 pVCpu->iem.s.fPrefixes = 0;
278 pVCpu->iem.s.uRexReg = 0;
279 pVCpu->iem.s.uRexB = 0;
280 pVCpu->iem.s.uRexIndex = 0;
281 pVCpu->iem.s.idxPrefix = 0;
282 pVCpu->iem.s.uVex3rdReg = 0;
283 pVCpu->iem.s.uVexLength = 0;
284 pVCpu->iem.s.fEvexStuff = 0;
285 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
286#ifdef IEM_WITH_CODE_TLB
287 pVCpu->iem.s.pbInstrBuf = NULL;
288 pVCpu->iem.s.offInstrNextByte = 0;
289 pVCpu->iem.s.offCurInstrStart = 0;
290# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
291 pVCpu->iem.s.offOpcode = 0;
292# endif
293# ifdef VBOX_STRICT
294 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
295 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
296 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
297 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
298# endif
299#else
300 pVCpu->iem.s.offOpcode = 0;
301 pVCpu->iem.s.cbOpcode = 0;
302#endif
303 pVCpu->iem.s.offModRm = 0;
304 pVCpu->iem.s.cActiveMappings = 0;
305 pVCpu->iem.s.iNextMapping = 0;
306 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
307
308#ifdef DBGFTRACE_ENABLED
309 switch (IEM_GET_CPU_MODE(pVCpu))
310 {
311 case IEMMODE_64BIT:
312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
313 break;
314 case IEMMODE_32BIT:
315 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
316 break;
317 case IEMMODE_16BIT:
318 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
319 break;
320 }
321#endif
322}
323
324
325/**
326 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
327 *
328 * This is mostly a copy of iemInitDecoder.
329 *
330 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
331 */
332DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
333{
334 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
339 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
340 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
341 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
342 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
343
344 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
345 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
346 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
347
348 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
349 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
350 pVCpu->iem.s.enmEffAddrMode = enmMode;
351 if (enmMode != IEMMODE_64BIT)
352 {
353 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
354 pVCpu->iem.s.enmEffOpSize = enmMode;
355 }
356 else
357 {
358 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
359 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
360 }
361 pVCpu->iem.s.fPrefixes = 0;
362 pVCpu->iem.s.uRexReg = 0;
363 pVCpu->iem.s.uRexB = 0;
364 pVCpu->iem.s.uRexIndex = 0;
365 pVCpu->iem.s.idxPrefix = 0;
366 pVCpu->iem.s.uVex3rdReg = 0;
367 pVCpu->iem.s.uVexLength = 0;
368 pVCpu->iem.s.fEvexStuff = 0;
369 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
370#ifdef IEM_WITH_CODE_TLB
371 if (pVCpu->iem.s.pbInstrBuf)
372 {
373 uint64_t off = (enmMode == IEMMODE_64BIT
374 ? pVCpu->cpum.GstCtx.rip
375 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
376 - pVCpu->iem.s.uInstrBufPc;
377 if (off < pVCpu->iem.s.cbInstrBufTotal)
378 {
379 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
380 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
381 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
382 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
383 else
384 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
385 }
386 else
387 {
388 pVCpu->iem.s.pbInstrBuf = NULL;
389 pVCpu->iem.s.offInstrNextByte = 0;
390 pVCpu->iem.s.offCurInstrStart = 0;
391 pVCpu->iem.s.cbInstrBuf = 0;
392 pVCpu->iem.s.cbInstrBufTotal = 0;
393 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
394 }
395 }
396 else
397 {
398 pVCpu->iem.s.offInstrNextByte = 0;
399 pVCpu->iem.s.offCurInstrStart = 0;
400 pVCpu->iem.s.cbInstrBuf = 0;
401 pVCpu->iem.s.cbInstrBufTotal = 0;
402# ifdef VBOX_STRICT
403 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
404# endif
405 }
406# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
407 pVCpu->iem.s.offOpcode = 0;
408# endif
409#else /* !IEM_WITH_CODE_TLB */
410 pVCpu->iem.s.cbOpcode = 0;
411 pVCpu->iem.s.offOpcode = 0;
412#endif /* !IEM_WITH_CODE_TLB */
413 pVCpu->iem.s.offModRm = 0;
414 Assert(pVCpu->iem.s.cActiveMappings == 0);
415 pVCpu->iem.s.iNextMapping = 0;
416 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
417 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
418
419#ifdef DBGFTRACE_ENABLED
420 switch (enmMode)
421 {
422 case IEMMODE_64BIT:
423 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
424 break;
425 case IEMMODE_32BIT:
426 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
427 break;
428 case IEMMODE_16BIT:
429 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
430 break;
431 }
432#endif
433}
434
435
436
437/**
438 * Prefetch opcodes the first time when starting executing.
439 *
440 * @returns Strict VBox status code.
441 * @param pVCpu The cross context virtual CPU structure of the
442 * calling thread.
443 * @param fExecOpts Optional execution flags:
444 * - IEM_F_BYPASS_HANDLERS
445 * - IEM_F_X86_DISREGARD_LOCK
446 */
447static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
448{
449 iemInitDecoder(pVCpu, fExecOpts);
450
451#ifndef IEM_WITH_CODE_TLB
452 /*
453 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
454 *
455 * First translate CS:rIP to a physical address.
456 *
457 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
458 * all relevant bytes from the first page, as it ASSUMES it's only ever
459 * called for dealing with CS.LIM, page crossing and instructions that
460 * are too long.
461 */
462 uint32_t cbToTryRead;
463 RTGCPTR GCPtrPC;
464 if (IEM_IS_64BIT_CODE(pVCpu))
465 {
466 cbToTryRead = GUEST_PAGE_SIZE;
467 GCPtrPC = pVCpu->cpum.GstCtx.rip;
468 if (IEM_IS_CANONICAL(GCPtrPC))
469 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
470 else
471 return iemRaiseGeneralProtectionFault0(pVCpu);
472 }
473 else
474 {
475 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
476 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
477 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
478 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
479 else
480 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
481 if (cbToTryRead) { /* likely */ }
482 else /* overflowed */
483 {
484 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
485 cbToTryRead = UINT32_MAX;
486 }
487 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
488 Assert(GCPtrPC <= UINT32_MAX);
489 }
490
491 PGMPTWALK Walk;
492 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
493 if (RT_SUCCESS(rc))
494 Assert(Walk.fSucceeded); /* probable. */
495 else
496 {
497 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
498# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
499 if (Walk.fFailed & PGM_WALKFAIL_EPT)
500 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
501# endif
502 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
503 }
504 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
505 else
506 {
507 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
508# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
509 if (Walk.fFailed & PGM_WALKFAIL_EPT)
510 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
511# endif
512 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
513 }
514 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
515 else
516 {
517 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
518# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
519 if (Walk.fFailed & PGM_WALKFAIL_EPT)
520 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
521# endif
522 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
523 }
524 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
525 /** @todo Check reserved bits and such stuff. PGM is better at doing
526 * that, so do it when implementing the guest virtual address
527 * TLB... */
528
529 /*
530 * Read the bytes at this address.
531 */
532 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
533 if (cbToTryRead > cbLeftOnPage)
534 cbToTryRead = cbLeftOnPage;
535 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
536 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
537
538 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
539 {
540 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
541 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
542 { /* likely */ }
543 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
544 {
545 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
546 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
547 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
548 }
549 else
550 {
551 Log((RT_SUCCESS(rcStrict)
552 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
553 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
554 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
555 return rcStrict;
556 }
557 }
558 else
559 {
560 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
561 if (RT_SUCCESS(rc))
562 { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
566 GCPtrPC, GCPhys, rc, cbToTryRead));
567 return rc;
568 }
569 }
570 pVCpu->iem.s.cbOpcode = cbToTryRead;
571#endif /* !IEM_WITH_CODE_TLB */
572 return VINF_SUCCESS;
573}
574
575
576/**
577 * Invalidates the IEM TLBs.
578 *
579 * This is called internally as well as by PGM when moving GC mappings.
580 *
581 * @param pVCpu The cross context virtual CPU structure of the calling
582 * thread.
583 */
584VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
585{
586#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
587 Log10(("IEMTlbInvalidateAll\n"));
588# ifdef IEM_WITH_CODE_TLB
589 pVCpu->iem.s.cbInstrBufTotal = 0;
590 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
591 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
592 { /* very likely */ }
593 else
594 {
595 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
596 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
597 while (i-- > 0)
598 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
599 }
600# endif
601
602# ifdef IEM_WITH_DATA_TLB
603 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
604 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
605 { /* very likely */ }
606 else
607 {
608 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
609 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
610 while (i-- > 0)
611 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
612 }
613# endif
614#else
615 RT_NOREF(pVCpu);
616#endif
617}
618
619
620/**
621 * Invalidates a page in the TLBs.
622 *
623 * @param pVCpu The cross context virtual CPU structure of the calling
624 * thread.
625 * @param GCPtr The address of the page to invalidate
626 * @thread EMT(pVCpu)
627 */
628VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
629{
630#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
631 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
632 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
633 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
634 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
635
636# ifdef IEM_WITH_CODE_TLB
637 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
638 {
639 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
640 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
641 pVCpu->iem.s.cbInstrBufTotal = 0;
642 }
643# endif
644
645# ifdef IEM_WITH_DATA_TLB
646 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
647 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
648# endif
649#else
650 NOREF(pVCpu); NOREF(GCPtr);
651#endif
652}
653
654
655#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
656/**
657 * Invalid both TLBs slow fashion following a rollover.
658 *
659 * Worker for IEMTlbInvalidateAllPhysical,
660 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
661 * iemMemMapJmp and others.
662 *
663 * @thread EMT(pVCpu)
664 */
665static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
666{
667 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
668 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
669 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
670
671 unsigned i;
672# ifdef IEM_WITH_CODE_TLB
673 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
674 while (i-- > 0)
675 {
676 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
677 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
678 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
679 }
680# endif
681# ifdef IEM_WITH_DATA_TLB
682 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
683 while (i-- > 0)
684 {
685 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
686 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
687 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
688 }
689# endif
690
691}
692#endif
693
694
695/**
696 * Invalidates the host physical aspects of the IEM TLBs.
697 *
698 * This is called internally as well as by PGM when moving GC mappings.
699 *
700 * @param pVCpu The cross context virtual CPU structure of the calling
701 * thread.
702 * @note Currently not used.
703 */
704VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
705{
706#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
707 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
708 Log10(("IEMTlbInvalidateAllPhysical\n"));
709
710# ifdef IEM_WITH_CODE_TLB
711 pVCpu->iem.s.cbInstrBufTotal = 0;
712# endif
713 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
714 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
715 {
716 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
717 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
718 }
719 else
720 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
721#else
722 NOREF(pVCpu);
723#endif
724}
725
726
727/**
728 * Invalidates the host physical aspects of the IEM TLBs.
729 *
730 * This is called internally as well as by PGM when moving GC mappings.
731 *
732 * @param pVM The cross context VM structure.
733 * @param idCpuCaller The ID of the calling EMT if available to the caller,
734 * otherwise NIL_VMCPUID.
735 *
736 * @remarks Caller holds the PGM lock.
737 */
738VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
739{
740#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
741 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
742 if (pVCpuCaller)
743 VMCPU_ASSERT_EMT(pVCpuCaller);
744 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
745
746 VMCC_FOR_EACH_VMCPU(pVM)
747 {
748# ifdef IEM_WITH_CODE_TLB
749 if (pVCpuCaller == pVCpu)
750 pVCpu->iem.s.cbInstrBufTotal = 0;
751# endif
752
753 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
754 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
755 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
756 { /* likely */}
757 else if (pVCpuCaller == pVCpu)
758 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
759 else
760 {
761 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
762 continue;
763 }
764 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
765 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
766 }
767 VMCC_FOR_EACH_VMCPU_END(pVM);
768
769#else
770 RT_NOREF(pVM, idCpuCaller);
771#endif
772}
773
774
775/**
776 * Flushes the prefetch buffer, light version.
777 */
778void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
779{
780#ifndef IEM_WITH_CODE_TLB
781 pVCpu->iem.s.cbOpcode = cbInstr;
782#else
783 RT_NOREF(pVCpu, cbInstr);
784#endif
785}
786
787
788/**
789 * Flushes the prefetch buffer, heavy version.
790 */
791void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
792{
793#ifndef IEM_WITH_CODE_TLB
794 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
795#elif 1
796 pVCpu->iem.s.pbInstrBuf = NULL;
797 RT_NOREF(cbInstr);
798#else
799 RT_NOREF(pVCpu, cbInstr);
800#endif
801}
802
803
804
805#ifdef IEM_WITH_CODE_TLB
806
807/**
808 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
809 * failure and jumps.
810 *
811 * We end up here for a number of reasons:
812 * - pbInstrBuf isn't yet initialized.
813 * - Advancing beyond the buffer boundrary (e.g. cross page).
814 * - Advancing beyond the CS segment limit.
815 * - Fetching from non-mappable page (e.g. MMIO).
816 *
817 * @param pVCpu The cross context virtual CPU structure of the
818 * calling thread.
819 * @param pvDst Where to return the bytes.
820 * @param cbDst Number of bytes to read. A value of zero is
821 * allowed for initializing pbInstrBuf (the
822 * recompiler does this). In this case it is best
823 * to set pbInstrBuf to NULL prior to the call.
824 */
825void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
826{
827# ifdef IN_RING3
828 for (;;)
829 {
830 Assert(cbDst <= 8);
831 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
832
833 /*
834 * We might have a partial buffer match, deal with that first to make the
835 * rest simpler. This is the first part of the cross page/buffer case.
836 */
837 if (pVCpu->iem.s.pbInstrBuf != NULL)
838 {
839 if (offBuf < pVCpu->iem.s.cbInstrBuf)
840 {
841 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
842 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
843 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
844
845 cbDst -= cbCopy;
846 pvDst = (uint8_t *)pvDst + cbCopy;
847 offBuf += cbCopy;
848 pVCpu->iem.s.offInstrNextByte += offBuf;
849 }
850 }
851
852 /*
853 * Check segment limit, figuring how much we're allowed to access at this point.
854 *
855 * We will fault immediately if RIP is past the segment limit / in non-canonical
856 * territory. If we do continue, there are one or more bytes to read before we
857 * end up in trouble and we need to do that first before faulting.
858 */
859 RTGCPTR GCPtrFirst;
860 uint32_t cbMaxRead;
861 if (IEM_IS_64BIT_CODE(pVCpu))
862 {
863 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
864 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
865 { /* likely */ }
866 else
867 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
868 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
869 }
870 else
871 {
872 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
873 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
874 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
875 { /* likely */ }
876 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
877 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
878 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
879 if (cbMaxRead != 0)
880 { /* likely */ }
881 else
882 {
883 /* Overflowed because address is 0 and limit is max. */
884 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
885 cbMaxRead = X86_PAGE_SIZE;
886 }
887 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
888 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
889 if (cbMaxRead2 < cbMaxRead)
890 cbMaxRead = cbMaxRead2;
891 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
892 }
893
894 /*
895 * Get the TLB entry for this piece of code.
896 */
897 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
898 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
899 if (pTlbe->uTag == uTag)
900 {
901 /* likely when executing lots of code, otherwise unlikely */
902# ifdef VBOX_WITH_STATISTICS
903 pVCpu->iem.s.CodeTlb.cTlbHits++;
904# endif
905 }
906 else
907 {
908 pVCpu->iem.s.CodeTlb.cTlbMisses++;
909 PGMPTWALK Walk;
910 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
911 if (RT_FAILURE(rc))
912 {
913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
914 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
915 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
916#endif
917 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
918 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
919 }
920
921 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
922 Assert(Walk.fSucceeded);
923 pTlbe->uTag = uTag;
924 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
925 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
926 pTlbe->GCPhys = Walk.GCPhys;
927 pTlbe->pbMappingR3 = NULL;
928 }
929
930 /*
931 * Check TLB page table level access flags.
932 */
933 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
934 {
935 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
936 {
937 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
938 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
939 }
940 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
941 {
942 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
943 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
944 }
945 }
946
947 /*
948 * Look up the physical page info if necessary.
949 */
950 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
951 { /* not necessary */ }
952 else
953 {
954 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
955 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
956 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
957 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
958 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
959 { /* likely */ }
960 else
961 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
962 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
963 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
964 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
965 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
966 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
967 }
968
969# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
970 /*
971 * Try do a direct read using the pbMappingR3 pointer.
972 */
973 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
974 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
975 {
976 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
977 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
978 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
979 {
980 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
981 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
982 }
983 else
984 {
985 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
986 if (cbInstr + (uint32_t)cbDst <= 15)
987 {
988 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
989 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
990 }
991 else
992 {
993 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
994 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
995 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
996 }
997 }
998 if (cbDst <= cbMaxRead)
999 {
1000 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1001 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1002 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1003 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1004 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1005 return;
1006 }
1007 pVCpu->iem.s.pbInstrBuf = NULL;
1008
1009 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1010 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1011 }
1012# else
1013# error "refactor as needed"
1014 /*
1015 * If there is no special read handling, so we can read a bit more and
1016 * put it in the prefetch buffer.
1017 */
1018 if ( cbDst < cbMaxRead
1019 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1020 {
1021 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1022 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1023 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1024 { /* likely */ }
1025 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1026 {
1027 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1028 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1029 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1030 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1031 }
1032 else
1033 {
1034 Log((RT_SUCCESS(rcStrict)
1035 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1036 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1037 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1038 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1039 }
1040 }
1041# endif
1042 /*
1043 * Special read handling, so only read exactly what's needed.
1044 * This is a highly unlikely scenario.
1045 */
1046 else
1047 {
1048 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1049
1050 /* Check instruction length. */
1051 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1052 if (RT_LIKELY(cbInstr + cbDst <= 15))
1053 { /* likely */ }
1054 else
1055 {
1056 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1057 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1058 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1059 }
1060
1061 /* Do the reading. */
1062 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1063 if (cbToRead > 0)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1066 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085
1086 /* Update the state and probably return. */
1087 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1088 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1089 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1090 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1091 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE;
1092 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1093 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1094 pVCpu->iem.s.pbInstrBuf = NULL;
1095 if (cbToRead == cbDst)
1096 return;
1097 }
1098
1099 /*
1100 * More to read, loop.
1101 */
1102 cbDst -= cbMaxRead;
1103 pvDst = (uint8_t *)pvDst + cbMaxRead;
1104 }
1105# else /* !IN_RING3 */
1106 RT_NOREF(pvDst, cbDst);
1107 if (pvDst || cbDst)
1108 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1109# endif /* !IN_RING3 */
1110}
1111
1112#else /* !IEM_WITH_CODE_TLB */
1113
1114/**
1115 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1116 * exception if it fails.
1117 *
1118 * @returns Strict VBox status code.
1119 * @param pVCpu The cross context virtual CPU structure of the
1120 * calling thread.
1121 * @param cbMin The minimum number of bytes relative offOpcode
1122 * that must be read.
1123 */
1124VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1125{
1126 /*
1127 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1128 *
1129 * First translate CS:rIP to a physical address.
1130 */
1131 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1132 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1133 uint8_t const cbLeft = cbOpcode - offOpcode;
1134 Assert(cbLeft < cbMin);
1135 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1136
1137 uint32_t cbToTryRead;
1138 RTGCPTR GCPtrNext;
1139 if (IEM_IS_64BIT_CODE(pVCpu))
1140 {
1141 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1142 if (!IEM_IS_CANONICAL(GCPtrNext))
1143 return iemRaiseGeneralProtectionFault0(pVCpu);
1144 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1145 }
1146 else
1147 {
1148 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1149 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1150 GCPtrNext32 += cbOpcode;
1151 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1152 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1153 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1154 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1155 if (!cbToTryRead) /* overflowed */
1156 {
1157 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1158 cbToTryRead = UINT32_MAX;
1159 /** @todo check out wrapping around the code segment. */
1160 }
1161 if (cbToTryRead < cbMin - cbLeft)
1162 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1163 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1164
1165 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1166 if (cbToTryRead > cbLeftOnPage)
1167 cbToTryRead = cbLeftOnPage;
1168 }
1169
1170 /* Restrict to opcode buffer space.
1171
1172 We're making ASSUMPTIONS here based on work done previously in
1173 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1174 be fetched in case of an instruction crossing two pages. */
1175 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1176 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1177 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1178 { /* likely */ }
1179 else
1180 {
1181 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1182 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1183 return iemRaiseGeneralProtectionFault0(pVCpu);
1184 }
1185
1186 PGMPTWALK Walk;
1187 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1188 if (RT_FAILURE(rc))
1189 {
1190 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1191#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1192 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1193 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1194#endif
1195 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1196 }
1197 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1198 {
1199 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1200#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1201 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1202 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1203#endif
1204 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1205 }
1206 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1207 {
1208 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1209#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1210 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1211 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1212#endif
1213 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1214 }
1215 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1216 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1217 /** @todo Check reserved bits and such stuff. PGM is better at doing
1218 * that, so do it when implementing the guest virtual address
1219 * TLB... */
1220
1221 /*
1222 * Read the bytes at this address.
1223 *
1224 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1225 * and since PATM should only patch the start of an instruction there
1226 * should be no need to check again here.
1227 */
1228 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1229 {
1230 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1231 cbToTryRead, PGMACCESSORIGIN_IEM);
1232 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1233 { /* likely */ }
1234 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1235 {
1236 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1237 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1238 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1239 }
1240 else
1241 {
1242 Log((RT_SUCCESS(rcStrict)
1243 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1244 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1245 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1246 return rcStrict;
1247 }
1248 }
1249 else
1250 {
1251 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1252 if (RT_SUCCESS(rc))
1253 { /* likely */ }
1254 else
1255 {
1256 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1257 return rc;
1258 }
1259 }
1260 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1261 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1262
1263 return VINF_SUCCESS;
1264}
1265
1266#endif /* !IEM_WITH_CODE_TLB */
1267#ifndef IEM_WITH_SETJMP
1268
1269/**
1270 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1271 *
1272 * @returns Strict VBox status code.
1273 * @param pVCpu The cross context virtual CPU structure of the
1274 * calling thread.
1275 * @param pb Where to return the opcode byte.
1276 */
1277VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1278{
1279 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1280 if (rcStrict == VINF_SUCCESS)
1281 {
1282 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1283 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1284 pVCpu->iem.s.offOpcode = offOpcode + 1;
1285 }
1286 else
1287 *pb = 0;
1288 return rcStrict;
1289}
1290
1291#else /* IEM_WITH_SETJMP */
1292
1293/**
1294 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1295 *
1296 * @returns The opcode byte.
1297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1298 */
1299uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1300{
1301# ifdef IEM_WITH_CODE_TLB
1302 uint8_t u8;
1303 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1304 return u8;
1305# else
1306 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1307 if (rcStrict == VINF_SUCCESS)
1308 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1309 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1310# endif
1311}
1312
1313#endif /* IEM_WITH_SETJMP */
1314
1315#ifndef IEM_WITH_SETJMP
1316
1317/**
1318 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1319 *
1320 * @returns Strict VBox status code.
1321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1322 * @param pu16 Where to return the opcode dword.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1325{
1326 uint8_t u8;
1327 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1328 if (rcStrict == VINF_SUCCESS)
1329 *pu16 = (int8_t)u8;
1330 return rcStrict;
1331}
1332
1333
1334/**
1335 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1336 *
1337 * @returns Strict VBox status code.
1338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1339 * @param pu32 Where to return the opcode dword.
1340 */
1341VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1342{
1343 uint8_t u8;
1344 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1345 if (rcStrict == VINF_SUCCESS)
1346 *pu32 = (int8_t)u8;
1347 return rcStrict;
1348}
1349
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1356 * @param pu64 Where to return the opcode qword.
1357 */
1358VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu64 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367#endif /* !IEM_WITH_SETJMP */
1368
1369
1370#ifndef IEM_WITH_SETJMP
1371
1372/**
1373 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1377 * @param pu16 Where to return the opcode word.
1378 */
1379VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1380{
1381 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1382 if (rcStrict == VINF_SUCCESS)
1383 {
1384 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1385# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1386 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1387# else
1388 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1389# endif
1390 pVCpu->iem.s.offOpcode = offOpcode + 2;
1391 }
1392 else
1393 *pu16 = 0;
1394 return rcStrict;
1395}
1396
1397#else /* IEM_WITH_SETJMP */
1398
1399/**
1400 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1401 *
1402 * @returns The opcode word.
1403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1404 */
1405uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1406{
1407# ifdef IEM_WITH_CODE_TLB
1408 uint16_t u16;
1409 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1410 return u16;
1411# else
1412 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1413 if (rcStrict == VINF_SUCCESS)
1414 {
1415 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1416 pVCpu->iem.s.offOpcode += 2;
1417# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1418 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1419# else
1420 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1421# endif
1422 }
1423 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1424# endif
1425}
1426
1427#endif /* IEM_WITH_SETJMP */
1428
1429#ifndef IEM_WITH_SETJMP
1430
1431/**
1432 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1433 *
1434 * @returns Strict VBox status code.
1435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1436 * @param pu32 Where to return the opcode double word.
1437 */
1438VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1439{
1440 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1441 if (rcStrict == VINF_SUCCESS)
1442 {
1443 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1444 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1445 pVCpu->iem.s.offOpcode = offOpcode + 2;
1446 }
1447 else
1448 *pu32 = 0;
1449 return rcStrict;
1450}
1451
1452
1453/**
1454 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1455 *
1456 * @returns Strict VBox status code.
1457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1458 * @param pu64 Where to return the opcode quad word.
1459 */
1460VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1461{
1462 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1463 if (rcStrict == VINF_SUCCESS)
1464 {
1465 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1466 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1467 pVCpu->iem.s.offOpcode = offOpcode + 2;
1468 }
1469 else
1470 *pu64 = 0;
1471 return rcStrict;
1472}
1473
1474#endif /* !IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode dword.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1492 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1493# else
1494 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1495 pVCpu->iem.s.abOpcode[offOpcode + 1],
1496 pVCpu->iem.s.abOpcode[offOpcode + 2],
1497 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1498# endif
1499 pVCpu->iem.s.offOpcode = offOpcode + 4;
1500 }
1501 else
1502 *pu32 = 0;
1503 return rcStrict;
1504}
1505
1506#else /* IEM_WITH_SETJMP */
1507
1508/**
1509 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1510 *
1511 * @returns The opcode dword.
1512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1513 */
1514uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1515{
1516# ifdef IEM_WITH_CODE_TLB
1517 uint32_t u32;
1518 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1519 return u32;
1520# else
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525 pVCpu->iem.s.offOpcode = offOpcode + 4;
1526# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1527 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1528# else
1529 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1530 pVCpu->iem.s.abOpcode[offOpcode + 1],
1531 pVCpu->iem.s.abOpcode[offOpcode + 2],
1532 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1533# endif
1534 }
1535 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1536# endif
1537}
1538
1539#endif /* IEM_WITH_SETJMP */
1540
1541#ifndef IEM_WITH_SETJMP
1542
1543/**
1544 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1545 *
1546 * @returns Strict VBox status code.
1547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1548 * @param pu64 Where to return the opcode dword.
1549 */
1550VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1551{
1552 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1553 if (rcStrict == VINF_SUCCESS)
1554 {
1555 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1556 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1557 pVCpu->iem.s.abOpcode[offOpcode + 1],
1558 pVCpu->iem.s.abOpcode[offOpcode + 2],
1559 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1560 pVCpu->iem.s.offOpcode = offOpcode + 4;
1561 }
1562 else
1563 *pu64 = 0;
1564 return rcStrict;
1565}
1566
1567
1568/**
1569 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1570 *
1571 * @returns Strict VBox status code.
1572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1573 * @param pu64 Where to return the opcode qword.
1574 */
1575VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1576{
1577 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1578 if (rcStrict == VINF_SUCCESS)
1579 {
1580 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1581 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1582 pVCpu->iem.s.abOpcode[offOpcode + 1],
1583 pVCpu->iem.s.abOpcode[offOpcode + 2],
1584 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1585 pVCpu->iem.s.offOpcode = offOpcode + 4;
1586 }
1587 else
1588 *pu64 = 0;
1589 return rcStrict;
1590}
1591
1592#endif /* !IEM_WITH_SETJMP */
1593
1594#ifndef IEM_WITH_SETJMP
1595
1596/**
1597 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1598 *
1599 * @returns Strict VBox status code.
1600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1601 * @param pu64 Where to return the opcode qword.
1602 */
1603VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1604{
1605 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1606 if (rcStrict == VINF_SUCCESS)
1607 {
1608 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1609# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1610 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1611# else
1612 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1613 pVCpu->iem.s.abOpcode[offOpcode + 1],
1614 pVCpu->iem.s.abOpcode[offOpcode + 2],
1615 pVCpu->iem.s.abOpcode[offOpcode + 3],
1616 pVCpu->iem.s.abOpcode[offOpcode + 4],
1617 pVCpu->iem.s.abOpcode[offOpcode + 5],
1618 pVCpu->iem.s.abOpcode[offOpcode + 6],
1619 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1620# endif
1621 pVCpu->iem.s.offOpcode = offOpcode + 8;
1622 }
1623 else
1624 *pu64 = 0;
1625 return rcStrict;
1626}
1627
1628#else /* IEM_WITH_SETJMP */
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1632 *
1633 * @returns The opcode qword.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 */
1636uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1637{
1638# ifdef IEM_WITH_CODE_TLB
1639 uint64_t u64;
1640 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1641 return u64;
1642# else
1643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1644 if (rcStrict == VINF_SUCCESS)
1645 {
1646 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1647 pVCpu->iem.s.offOpcode = offOpcode + 8;
1648# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1649 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1650# else
1651 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1652 pVCpu->iem.s.abOpcode[offOpcode + 1],
1653 pVCpu->iem.s.abOpcode[offOpcode + 2],
1654 pVCpu->iem.s.abOpcode[offOpcode + 3],
1655 pVCpu->iem.s.abOpcode[offOpcode + 4],
1656 pVCpu->iem.s.abOpcode[offOpcode + 5],
1657 pVCpu->iem.s.abOpcode[offOpcode + 6],
1658 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1659# endif
1660 }
1661 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1662# endif
1663}
1664
1665#endif /* IEM_WITH_SETJMP */
1666
1667
1668
1669/** @name Misc Worker Functions.
1670 * @{
1671 */
1672
1673/**
1674 * Gets the exception class for the specified exception vector.
1675 *
1676 * @returns The class of the specified exception.
1677 * @param uVector The exception vector.
1678 */
1679static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1680{
1681 Assert(uVector <= X86_XCPT_LAST);
1682 switch (uVector)
1683 {
1684 case X86_XCPT_DE:
1685 case X86_XCPT_TS:
1686 case X86_XCPT_NP:
1687 case X86_XCPT_SS:
1688 case X86_XCPT_GP:
1689 case X86_XCPT_SX: /* AMD only */
1690 return IEMXCPTCLASS_CONTRIBUTORY;
1691
1692 case X86_XCPT_PF:
1693 case X86_XCPT_VE: /* Intel only */
1694 return IEMXCPTCLASS_PAGE_FAULT;
1695
1696 case X86_XCPT_DF:
1697 return IEMXCPTCLASS_DOUBLE_FAULT;
1698 }
1699 return IEMXCPTCLASS_BENIGN;
1700}
1701
1702
1703/**
1704 * Evaluates how to handle an exception caused during delivery of another event
1705 * (exception / interrupt).
1706 *
1707 * @returns How to handle the recursive exception.
1708 * @param pVCpu The cross context virtual CPU structure of the
1709 * calling thread.
1710 * @param fPrevFlags The flags of the previous event.
1711 * @param uPrevVector The vector of the previous event.
1712 * @param fCurFlags The flags of the current exception.
1713 * @param uCurVector The vector of the current exception.
1714 * @param pfXcptRaiseInfo Where to store additional information about the
1715 * exception condition. Optional.
1716 */
1717VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1718 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1719{
1720 /*
1721 * Only CPU exceptions can be raised while delivering other events, software interrupt
1722 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1723 */
1724 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1725 Assert(pVCpu); RT_NOREF(pVCpu);
1726 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1727
1728 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1729 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1730 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1731 {
1732 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1733 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1734 {
1735 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1736 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1737 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1738 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1739 {
1740 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1741 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1742 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1743 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1744 uCurVector, pVCpu->cpum.GstCtx.cr2));
1745 }
1746 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1747 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1748 {
1749 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1750 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1751 }
1752 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1753 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1754 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1755 {
1756 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1757 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1758 }
1759 }
1760 else
1761 {
1762 if (uPrevVector == X86_XCPT_NMI)
1763 {
1764 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1765 if (uCurVector == X86_XCPT_PF)
1766 {
1767 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1768 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1769 }
1770 }
1771 else if ( uPrevVector == X86_XCPT_AC
1772 && uCurVector == X86_XCPT_AC)
1773 {
1774 enmRaise = IEMXCPTRAISE_CPU_HANG;
1775 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1776 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1777 }
1778 }
1779 }
1780 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1781 {
1782 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1783 if (uCurVector == X86_XCPT_PF)
1784 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1785 }
1786 else
1787 {
1788 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1789 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1790 }
1791
1792 if (pfXcptRaiseInfo)
1793 *pfXcptRaiseInfo = fRaiseInfo;
1794 return enmRaise;
1795}
1796
1797
1798/**
1799 * Enters the CPU shutdown state initiated by a triple fault or other
1800 * unrecoverable conditions.
1801 *
1802 * @returns Strict VBox status code.
1803 * @param pVCpu The cross context virtual CPU structure of the
1804 * calling thread.
1805 */
1806static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1807{
1808 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1809 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1810
1811 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1812 {
1813 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1814 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1815 }
1816
1817 RT_NOREF(pVCpu);
1818 return VINF_EM_TRIPLE_FAULT;
1819}
1820
1821
1822/**
1823 * Validates a new SS segment.
1824 *
1825 * @returns VBox strict status code.
1826 * @param pVCpu The cross context virtual CPU structure of the
1827 * calling thread.
1828 * @param NewSS The new SS selctor.
1829 * @param uCpl The CPL to load the stack for.
1830 * @param pDesc Where to return the descriptor.
1831 */
1832static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1833{
1834 /* Null selectors are not allowed (we're not called for dispatching
1835 interrupts with SS=0 in long mode). */
1836 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1837 {
1838 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1839 return iemRaiseTaskSwitchFault0(pVCpu);
1840 }
1841
1842 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1843 if ((NewSS & X86_SEL_RPL) != uCpl)
1844 {
1845 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1846 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1847 }
1848
1849 /*
1850 * Read the descriptor.
1851 */
1852 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1853 if (rcStrict != VINF_SUCCESS)
1854 return rcStrict;
1855
1856 /*
1857 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1858 */
1859 if (!pDesc->Legacy.Gen.u1DescType)
1860 {
1861 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1862 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1863 }
1864
1865 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1866 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1867 {
1868 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1869 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1870 }
1871 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876
1877 /* Is it there? */
1878 /** @todo testcase: Is this checked before the canonical / limit check below? */
1879 if (!pDesc->Legacy.Gen.u1Present)
1880 {
1881 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1882 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1883 }
1884
1885 return VINF_SUCCESS;
1886}
1887
1888/** @} */
1889
1890
1891/** @name Raising Exceptions.
1892 *
1893 * @{
1894 */
1895
1896
1897/**
1898 * Loads the specified stack far pointer from the TSS.
1899 *
1900 * @returns VBox strict status code.
1901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1902 * @param uCpl The CPL to load the stack for.
1903 * @param pSelSS Where to return the new stack segment.
1904 * @param puEsp Where to return the new stack pointer.
1905 */
1906static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1907{
1908 VBOXSTRICTRC rcStrict;
1909 Assert(uCpl < 4);
1910
1911 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1912 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1913 {
1914 /*
1915 * 16-bit TSS (X86TSS16).
1916 */
1917 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1918 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1919 {
1920 uint32_t off = uCpl * 4 + 2;
1921 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1922 {
1923 /** @todo check actual access pattern here. */
1924 uint32_t u32Tmp = 0; /* gcc maybe... */
1925 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1926 if (rcStrict == VINF_SUCCESS)
1927 {
1928 *puEsp = RT_LOWORD(u32Tmp);
1929 *pSelSS = RT_HIWORD(u32Tmp);
1930 return VINF_SUCCESS;
1931 }
1932 }
1933 else
1934 {
1935 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1936 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1937 }
1938 break;
1939 }
1940
1941 /*
1942 * 32-bit TSS (X86TSS32).
1943 */
1944 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1945 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1946 {
1947 uint32_t off = uCpl * 8 + 4;
1948 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1949 {
1950/** @todo check actual access pattern here. */
1951 uint64_t u64Tmp;
1952 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1953 if (rcStrict == VINF_SUCCESS)
1954 {
1955 *puEsp = u64Tmp & UINT32_MAX;
1956 *pSelSS = (RTSEL)(u64Tmp >> 32);
1957 return VINF_SUCCESS;
1958 }
1959 }
1960 else
1961 {
1962 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1963 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1964 }
1965 break;
1966 }
1967
1968 default:
1969 AssertFailed();
1970 rcStrict = VERR_IEM_IPE_4;
1971 break;
1972 }
1973
1974 *puEsp = 0; /* make gcc happy */
1975 *pSelSS = 0; /* make gcc happy */
1976 return rcStrict;
1977}
1978
1979
1980/**
1981 * Loads the specified stack pointer from the 64-bit TSS.
1982 *
1983 * @returns VBox strict status code.
1984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1985 * @param uCpl The CPL to load the stack for.
1986 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1987 * @param puRsp Where to return the new stack pointer.
1988 */
1989static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1990{
1991 Assert(uCpl < 4);
1992 Assert(uIst < 8);
1993 *puRsp = 0; /* make gcc happy */
1994
1995 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1996 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1997
1998 uint32_t off;
1999 if (uIst)
2000 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2001 else
2002 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2003 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2004 {
2005 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2006 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2007 }
2008
2009 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2010}
2011
2012
2013/**
2014 * Adjust the CPU state according to the exception being raised.
2015 *
2016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2017 * @param u8Vector The exception that has been raised.
2018 */
2019DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2020{
2021 switch (u8Vector)
2022 {
2023 case X86_XCPT_DB:
2024 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2025 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2026 break;
2027 /** @todo Read the AMD and Intel exception reference... */
2028 }
2029}
2030
2031
2032/**
2033 * Implements exceptions and interrupts for real mode.
2034 *
2035 * @returns VBox strict status code.
2036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2037 * @param cbInstr The number of bytes to offset rIP by in the return
2038 * address.
2039 * @param u8Vector The interrupt / exception vector number.
2040 * @param fFlags The flags.
2041 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2042 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2043 */
2044static VBOXSTRICTRC
2045iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2046 uint8_t cbInstr,
2047 uint8_t u8Vector,
2048 uint32_t fFlags,
2049 uint16_t uErr,
2050 uint64_t uCr2) RT_NOEXCEPT
2051{
2052 NOREF(uErr); NOREF(uCr2);
2053 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2054
2055 /*
2056 * Read the IDT entry.
2057 */
2058 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2059 {
2060 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2061 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2062 }
2063 RTFAR16 Idte;
2064 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2065 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2066 {
2067 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2068 return rcStrict;
2069 }
2070
2071 /*
2072 * Push the stack frame.
2073 */
2074 uint16_t *pu16Frame;
2075 uint64_t uNewRsp;
2076 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2077 if (rcStrict != VINF_SUCCESS)
2078 return rcStrict;
2079
2080 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2081#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2082 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2083 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2084 fEfl |= UINT16_C(0xf000);
2085#endif
2086 pu16Frame[2] = (uint16_t)fEfl;
2087 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2088 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2089 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2090 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2091 return rcStrict;
2092
2093 /*
2094 * Load the vector address into cs:ip and make exception specific state
2095 * adjustments.
2096 */
2097 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2098 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2099 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2100 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2101 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2102 pVCpu->cpum.GstCtx.rip = Idte.off;
2103 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2104 IEMMISC_SET_EFL(pVCpu, fEfl);
2105
2106 /** @todo do we actually do this in real mode? */
2107 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2108 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2109
2110 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2111 so best leave them alone in case we're in a weird kind of real mode... */
2112
2113 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2114}
2115
2116
2117/**
2118 * Loads a NULL data selector into when coming from V8086 mode.
2119 *
2120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2121 * @param pSReg Pointer to the segment register.
2122 */
2123DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2124{
2125 pSReg->Sel = 0;
2126 pSReg->ValidSel = 0;
2127 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2128 {
2129 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2130 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2131 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2132 }
2133 else
2134 {
2135 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2136 /** @todo check this on AMD-V */
2137 pSReg->u64Base = 0;
2138 pSReg->u32Limit = 0;
2139 }
2140}
2141
2142
2143/**
2144 * Loads a segment selector during a task switch in V8086 mode.
2145 *
2146 * @param pSReg Pointer to the segment register.
2147 * @param uSel The selector value to load.
2148 */
2149DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2150{
2151 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2152 pSReg->Sel = uSel;
2153 pSReg->ValidSel = uSel;
2154 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2155 pSReg->u64Base = uSel << 4;
2156 pSReg->u32Limit = 0xffff;
2157 pSReg->Attr.u = 0xf3;
2158}
2159
2160
2161/**
2162 * Loads a segment selector during a task switch in protected mode.
2163 *
2164 * In this task switch scenario, we would throw \#TS exceptions rather than
2165 * \#GPs.
2166 *
2167 * @returns VBox strict status code.
2168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2169 * @param pSReg Pointer to the segment register.
2170 * @param uSel The new selector value.
2171 *
2172 * @remarks This does _not_ handle CS or SS.
2173 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2174 */
2175static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2176{
2177 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2178
2179 /* Null data selector. */
2180 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2181 {
2182 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2184 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2185 return VINF_SUCCESS;
2186 }
2187
2188 /* Fetch the descriptor. */
2189 IEMSELDESC Desc;
2190 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2191 if (rcStrict != VINF_SUCCESS)
2192 {
2193 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2194 VBOXSTRICTRC_VAL(rcStrict)));
2195 return rcStrict;
2196 }
2197
2198 /* Must be a data segment or readable code segment. */
2199 if ( !Desc.Legacy.Gen.u1DescType
2200 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2201 {
2202 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2203 Desc.Legacy.Gen.u4Type));
2204 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2205 }
2206
2207 /* Check privileges for data segments and non-conforming code segments. */
2208 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2209 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2210 {
2211 /* The RPL and the new CPL must be less than or equal to the DPL. */
2212 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2213 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2214 {
2215 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2216 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2217 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2218 }
2219 }
2220
2221 /* Is it there? */
2222 if (!Desc.Legacy.Gen.u1Present)
2223 {
2224 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2225 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2226 }
2227
2228 /* The base and limit. */
2229 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2230 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2231
2232 /*
2233 * Ok, everything checked out fine. Now set the accessed bit before
2234 * committing the result into the registers.
2235 */
2236 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2237 {
2238 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2239 if (rcStrict != VINF_SUCCESS)
2240 return rcStrict;
2241 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2242 }
2243
2244 /* Commit */
2245 pSReg->Sel = uSel;
2246 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2247 pSReg->u32Limit = cbLimit;
2248 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2249 pSReg->ValidSel = uSel;
2250 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2251 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2252 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2253
2254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2255 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2256 return VINF_SUCCESS;
2257}
2258
2259
2260/**
2261 * Performs a task switch.
2262 *
2263 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2264 * caller is responsible for performing the necessary checks (like DPL, TSS
2265 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2266 * reference for JMP, CALL, IRET.
2267 *
2268 * If the task switch is the due to a software interrupt or hardware exception,
2269 * the caller is responsible for validating the TSS selector and descriptor. See
2270 * Intel Instruction reference for INT n.
2271 *
2272 * @returns VBox strict status code.
2273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2274 * @param enmTaskSwitch The cause of the task switch.
2275 * @param uNextEip The EIP effective after the task switch.
2276 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2277 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2278 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2279 * @param SelTSS The TSS selector of the new task.
2280 * @param pNewDescTSS Pointer to the new TSS descriptor.
2281 */
2282VBOXSTRICTRC
2283iemTaskSwitch(PVMCPUCC pVCpu,
2284 IEMTASKSWITCH enmTaskSwitch,
2285 uint32_t uNextEip,
2286 uint32_t fFlags,
2287 uint16_t uErr,
2288 uint64_t uCr2,
2289 RTSEL SelTSS,
2290 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2291{
2292 Assert(!IEM_IS_REAL_MODE(pVCpu));
2293 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2294 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2295
2296 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2297 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2298 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2299 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2300 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2301
2302 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2303 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2304
2305 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2306 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2307
2308 /* Update CR2 in case it's a page-fault. */
2309 /** @todo This should probably be done much earlier in IEM/PGM. See
2310 * @bugref{5653#c49}. */
2311 if (fFlags & IEM_XCPT_FLAGS_CR2)
2312 pVCpu->cpum.GstCtx.cr2 = uCr2;
2313
2314 /*
2315 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2316 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2317 */
2318 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2319 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2320 if (uNewTSSLimit < uNewTSSLimitMin)
2321 {
2322 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2323 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2324 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2325 }
2326
2327 /*
2328 * Task switches in VMX non-root mode always cause task switches.
2329 * The new TSS must have been read and validated (DPL, limits etc.) before a
2330 * task-switch VM-exit commences.
2331 *
2332 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2333 */
2334 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2335 {
2336 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2337 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2338 }
2339
2340 /*
2341 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2342 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2343 */
2344 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2345 {
2346 uint32_t const uExitInfo1 = SelTSS;
2347 uint32_t uExitInfo2 = uErr;
2348 switch (enmTaskSwitch)
2349 {
2350 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2351 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2352 default: break;
2353 }
2354 if (fFlags & IEM_XCPT_FLAGS_ERR)
2355 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2356 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2357 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2358
2359 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2360 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2361 RT_NOREF2(uExitInfo1, uExitInfo2);
2362 }
2363
2364 /*
2365 * Check the current TSS limit. The last written byte to the current TSS during the
2366 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2367 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2368 *
2369 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2370 * end up with smaller than "legal" TSS limits.
2371 */
2372 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2373 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2374 if (uCurTSSLimit < uCurTSSLimitMin)
2375 {
2376 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2377 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2378 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2379 }
2380
2381 /*
2382 * Verify that the new TSS can be accessed and map it. Map only the required contents
2383 * and not the entire TSS.
2384 */
2385 void *pvNewTSS;
2386 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2387 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2388 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2389 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2390 * not perform correct translation if this happens. See Intel spec. 7.2.1
2391 * "Task-State Segment". */
2392 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2393 if (rcStrict != VINF_SUCCESS)
2394 {
2395 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2396 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2397 return rcStrict;
2398 }
2399
2400 /*
2401 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2402 */
2403 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2404 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2405 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2406 {
2407 PX86DESC pDescCurTSS;
2408 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2409 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2410 if (rcStrict != VINF_SUCCESS)
2411 {
2412 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2413 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2414 return rcStrict;
2415 }
2416
2417 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2418 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2419 if (rcStrict != VINF_SUCCESS)
2420 {
2421 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2422 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2423 return rcStrict;
2424 }
2425
2426 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2427 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2428 {
2429 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2430 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2431 fEFlags &= ~X86_EFL_NT;
2432 }
2433 }
2434
2435 /*
2436 * Save the CPU state into the current TSS.
2437 */
2438 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2439 if (GCPtrNewTSS == GCPtrCurTSS)
2440 {
2441 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2442 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2443 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2444 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2445 pVCpu->cpum.GstCtx.ldtr.Sel));
2446 }
2447 if (fIsNewTSS386)
2448 {
2449 /*
2450 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2451 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2452 */
2453 void *pvCurTSS32;
2454 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2455 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2456 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2457 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2458 if (rcStrict != VINF_SUCCESS)
2459 {
2460 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2461 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2462 return rcStrict;
2463 }
2464
2465 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2466 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2467 pCurTSS32->eip = uNextEip;
2468 pCurTSS32->eflags = fEFlags;
2469 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2470 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2471 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2472 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2473 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2474 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2475 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2476 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2477 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2478 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2479 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2480 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2481 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2482 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2483
2484 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2485 if (rcStrict != VINF_SUCCESS)
2486 {
2487 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2488 VBOXSTRICTRC_VAL(rcStrict)));
2489 return rcStrict;
2490 }
2491 }
2492 else
2493 {
2494 /*
2495 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2496 */
2497 void *pvCurTSS16;
2498 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2499 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2500 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2501 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2502 if (rcStrict != VINF_SUCCESS)
2503 {
2504 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2505 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2506 return rcStrict;
2507 }
2508
2509 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2510 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2511 pCurTSS16->ip = uNextEip;
2512 pCurTSS16->flags = (uint16_t)fEFlags;
2513 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2514 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2515 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2516 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2517 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2518 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2519 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2520 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2521 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2522 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2523 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2524 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2525
2526 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2527 if (rcStrict != VINF_SUCCESS)
2528 {
2529 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2530 VBOXSTRICTRC_VAL(rcStrict)));
2531 return rcStrict;
2532 }
2533 }
2534
2535 /*
2536 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2537 */
2538 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2539 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2540 {
2541 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2542 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2543 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2544 }
2545
2546 /*
2547 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2548 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2549 */
2550 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2551 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2552 bool fNewDebugTrap;
2553 if (fIsNewTSS386)
2554 {
2555 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2556 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2557 uNewEip = pNewTSS32->eip;
2558 uNewEflags = pNewTSS32->eflags;
2559 uNewEax = pNewTSS32->eax;
2560 uNewEcx = pNewTSS32->ecx;
2561 uNewEdx = pNewTSS32->edx;
2562 uNewEbx = pNewTSS32->ebx;
2563 uNewEsp = pNewTSS32->esp;
2564 uNewEbp = pNewTSS32->ebp;
2565 uNewEsi = pNewTSS32->esi;
2566 uNewEdi = pNewTSS32->edi;
2567 uNewES = pNewTSS32->es;
2568 uNewCS = pNewTSS32->cs;
2569 uNewSS = pNewTSS32->ss;
2570 uNewDS = pNewTSS32->ds;
2571 uNewFS = pNewTSS32->fs;
2572 uNewGS = pNewTSS32->gs;
2573 uNewLdt = pNewTSS32->selLdt;
2574 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2575 }
2576 else
2577 {
2578 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2579 uNewCr3 = 0;
2580 uNewEip = pNewTSS16->ip;
2581 uNewEflags = pNewTSS16->flags;
2582 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2583 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2584 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2585 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2586 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2587 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2588 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2589 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2590 uNewES = pNewTSS16->es;
2591 uNewCS = pNewTSS16->cs;
2592 uNewSS = pNewTSS16->ss;
2593 uNewDS = pNewTSS16->ds;
2594 uNewFS = 0;
2595 uNewGS = 0;
2596 uNewLdt = pNewTSS16->selLdt;
2597 fNewDebugTrap = false;
2598 }
2599
2600 if (GCPtrNewTSS == GCPtrCurTSS)
2601 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2602 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2603
2604 /*
2605 * We're done accessing the new TSS.
2606 */
2607 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2608 if (rcStrict != VINF_SUCCESS)
2609 {
2610 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2611 return rcStrict;
2612 }
2613
2614 /*
2615 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2616 */
2617 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2618 {
2619 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2620 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2621 if (rcStrict != VINF_SUCCESS)
2622 {
2623 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2624 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2625 return rcStrict;
2626 }
2627
2628 /* Check that the descriptor indicates the new TSS is available (not busy). */
2629 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2630 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2631 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2632
2633 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2634 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2635 if (rcStrict != VINF_SUCCESS)
2636 {
2637 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2638 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2639 return rcStrict;
2640 }
2641 }
2642
2643 /*
2644 * From this point on, we're technically in the new task. We will defer exceptions
2645 * until the completion of the task switch but before executing any instructions in the new task.
2646 */
2647 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2648 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2649 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2650 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2651 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2652 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2653 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2654
2655 /* Set the busy bit in TR. */
2656 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2657
2658 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2659 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2660 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2661 {
2662 uNewEflags |= X86_EFL_NT;
2663 }
2664
2665 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2666 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2667 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2668
2669 pVCpu->cpum.GstCtx.eip = uNewEip;
2670 pVCpu->cpum.GstCtx.eax = uNewEax;
2671 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2672 pVCpu->cpum.GstCtx.edx = uNewEdx;
2673 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2674 pVCpu->cpum.GstCtx.esp = uNewEsp;
2675 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2676 pVCpu->cpum.GstCtx.esi = uNewEsi;
2677 pVCpu->cpum.GstCtx.edi = uNewEdi;
2678
2679 uNewEflags &= X86_EFL_LIVE_MASK;
2680 uNewEflags |= X86_EFL_RA1_MASK;
2681 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2682
2683 /*
2684 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2685 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2686 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2687 */
2688 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2689 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2690
2691 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2692 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2693
2694 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2695 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2696
2697 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2698 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2699
2700 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2701 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2702
2703 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2704 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2705 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2706
2707 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2708 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2709 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2710 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2711
2712 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2713 {
2714 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2715 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2716 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2717 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2718 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2719 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2720 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2721 }
2722
2723 /*
2724 * Switch CR3 for the new task.
2725 */
2726 if ( fIsNewTSS386
2727 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2728 {
2729 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2730 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2731 AssertRCSuccessReturn(rc, rc);
2732
2733 /* Inform PGM. */
2734 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2735 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2736 AssertRCReturn(rc, rc);
2737 /* ignore informational status codes */
2738
2739 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2740 }
2741
2742 /*
2743 * Switch LDTR for the new task.
2744 */
2745 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2746 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2747 else
2748 {
2749 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2750
2751 IEMSELDESC DescNewLdt;
2752 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2753 if (rcStrict != VINF_SUCCESS)
2754 {
2755 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2756 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2757 return rcStrict;
2758 }
2759 if ( !DescNewLdt.Legacy.Gen.u1Present
2760 || DescNewLdt.Legacy.Gen.u1DescType
2761 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2762 {
2763 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2764 uNewLdt, DescNewLdt.Legacy.u));
2765 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2766 }
2767
2768 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2769 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2770 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2771 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2773 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2774 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2775 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2776 }
2777
2778 IEMSELDESC DescSS;
2779 if (IEM_IS_V86_MODE(pVCpu))
2780 {
2781 IEM_SET_CPL(pVCpu, 3);
2782 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2783 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2784 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2785 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2786 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2787 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2788
2789 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2790 DescSS.Legacy.u = 0;
2791 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2792 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2793 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2794 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2795 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2796 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2797 DescSS.Legacy.Gen.u2Dpl = 3;
2798 }
2799 else
2800 {
2801 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2802
2803 /*
2804 * Load the stack segment for the new task.
2805 */
2806 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2807 {
2808 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2809 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2810 }
2811
2812 /* Fetch the descriptor. */
2813 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2814 if (rcStrict != VINF_SUCCESS)
2815 {
2816 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2817 VBOXSTRICTRC_VAL(rcStrict)));
2818 return rcStrict;
2819 }
2820
2821 /* SS must be a data segment and writable. */
2822 if ( !DescSS.Legacy.Gen.u1DescType
2823 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2824 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2825 {
2826 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2827 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2832 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2833 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2834 {
2835 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2836 uNewCpl));
2837 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2838 }
2839
2840 /* Is it there? */
2841 if (!DescSS.Legacy.Gen.u1Present)
2842 {
2843 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2844 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2845 }
2846
2847 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2848 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2849
2850 /* Set the accessed bit before committing the result into SS. */
2851 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2852 {
2853 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2854 if (rcStrict != VINF_SUCCESS)
2855 return rcStrict;
2856 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2857 }
2858
2859 /* Commit SS. */
2860 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2861 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2862 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2863 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2864 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2865 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2866 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2867
2868 /* CPL has changed, update IEM before loading rest of segments. */
2869 IEM_SET_CPL(pVCpu, uNewCpl);
2870
2871 /*
2872 * Load the data segments for the new task.
2873 */
2874 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2875 if (rcStrict != VINF_SUCCESS)
2876 return rcStrict;
2877 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2878 if (rcStrict != VINF_SUCCESS)
2879 return rcStrict;
2880 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2881 if (rcStrict != VINF_SUCCESS)
2882 return rcStrict;
2883 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2884 if (rcStrict != VINF_SUCCESS)
2885 return rcStrict;
2886
2887 /*
2888 * Load the code segment for the new task.
2889 */
2890 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2891 {
2892 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2894 }
2895
2896 /* Fetch the descriptor. */
2897 IEMSELDESC DescCS;
2898 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2899 if (rcStrict != VINF_SUCCESS)
2900 {
2901 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2902 return rcStrict;
2903 }
2904
2905 /* CS must be a code segment. */
2906 if ( !DescCS.Legacy.Gen.u1DescType
2907 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2908 {
2909 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2910 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2911 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2912 }
2913
2914 /* For conforming CS, DPL must be less than or equal to the RPL. */
2915 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2916 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2917 {
2918 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2919 DescCS.Legacy.Gen.u2Dpl));
2920 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2921 }
2922
2923 /* For non-conforming CS, DPL must match RPL. */
2924 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2925 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2926 {
2927 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2928 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2929 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2930 }
2931
2932 /* Is it there? */
2933 if (!DescCS.Legacy.Gen.u1Present)
2934 {
2935 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2936 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2937 }
2938
2939 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2940 u64Base = X86DESC_BASE(&DescCS.Legacy);
2941
2942 /* Set the accessed bit before committing the result into CS. */
2943 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2944 {
2945 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2946 if (rcStrict != VINF_SUCCESS)
2947 return rcStrict;
2948 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2949 }
2950
2951 /* Commit CS. */
2952 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2953 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2954 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2955 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2956 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2957 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2959 }
2960
2961 /* Make sure the CPU mode is correct. */
2962 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2963 if (fExecNew != pVCpu->iem.s.fExec)
2964 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2965 pVCpu->iem.s.fExec = fExecNew;
2966
2967 /** @todo Debug trap. */
2968 if (fIsNewTSS386 && fNewDebugTrap)
2969 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2970
2971 /*
2972 * Construct the error code masks based on what caused this task switch.
2973 * See Intel Instruction reference for INT.
2974 */
2975 uint16_t uExt;
2976 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2977 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2978 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2979 uExt = 1;
2980 else
2981 uExt = 0;
2982
2983 /*
2984 * Push any error code on to the new stack.
2985 */
2986 if (fFlags & IEM_XCPT_FLAGS_ERR)
2987 {
2988 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2989 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2990 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2991
2992 /* Check that there is sufficient space on the stack. */
2993 /** @todo Factor out segment limit checking for normal/expand down segments
2994 * into a separate function. */
2995 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2996 {
2997 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2998 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2999 {
3000 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3001 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3002 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3003 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3004 }
3005 }
3006 else
3007 {
3008 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3009 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3010 {
3011 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3012 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3013 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3014 }
3015 }
3016
3017
3018 if (fIsNewTSS386)
3019 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3020 else
3021 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3022 if (rcStrict != VINF_SUCCESS)
3023 {
3024 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3025 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3026 return rcStrict;
3027 }
3028 }
3029
3030 /* Check the new EIP against the new CS limit. */
3031 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3032 {
3033 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3034 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3035 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3036 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3037 }
3038
3039 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3040 pVCpu->cpum.GstCtx.ss.Sel));
3041 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3042}
3043
3044
3045/**
3046 * Implements exceptions and interrupts for protected mode.
3047 *
3048 * @returns VBox strict status code.
3049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3050 * @param cbInstr The number of bytes to offset rIP by in the return
3051 * address.
3052 * @param u8Vector The interrupt / exception vector number.
3053 * @param fFlags The flags.
3054 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3055 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3056 */
3057static VBOXSTRICTRC
3058iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3059 uint8_t cbInstr,
3060 uint8_t u8Vector,
3061 uint32_t fFlags,
3062 uint16_t uErr,
3063 uint64_t uCr2) RT_NOEXCEPT
3064{
3065 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3066
3067 /*
3068 * Read the IDT entry.
3069 */
3070 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3071 {
3072 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3073 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3074 }
3075 X86DESC Idte;
3076 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3077 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3078 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3079 {
3080 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3081 return rcStrict;
3082 }
3083 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3084 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3085 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3086
3087 /*
3088 * Check the descriptor type, DPL and such.
3089 * ASSUMES this is done in the same order as described for call-gate calls.
3090 */
3091 if (Idte.Gate.u1DescType)
3092 {
3093 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3094 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3095 }
3096 bool fTaskGate = false;
3097 uint8_t f32BitGate = true;
3098 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3099 switch (Idte.Gate.u4Type)
3100 {
3101 case X86_SEL_TYPE_SYS_UNDEFINED:
3102 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3103 case X86_SEL_TYPE_SYS_LDT:
3104 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3105 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3106 case X86_SEL_TYPE_SYS_UNDEFINED2:
3107 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3108 case X86_SEL_TYPE_SYS_UNDEFINED3:
3109 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3110 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3111 case X86_SEL_TYPE_SYS_UNDEFINED4:
3112 {
3113 /** @todo check what actually happens when the type is wrong...
3114 * esp. call gates. */
3115 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3116 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3117 }
3118
3119 case X86_SEL_TYPE_SYS_286_INT_GATE:
3120 f32BitGate = false;
3121 RT_FALL_THRU();
3122 case X86_SEL_TYPE_SYS_386_INT_GATE:
3123 fEflToClear |= X86_EFL_IF;
3124 break;
3125
3126 case X86_SEL_TYPE_SYS_TASK_GATE:
3127 fTaskGate = true;
3128#ifndef IEM_IMPLEMENTS_TASKSWITCH
3129 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3130#endif
3131 break;
3132
3133 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3134 f32BitGate = false;
3135 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3136 break;
3137
3138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3139 }
3140
3141 /* Check DPL against CPL if applicable. */
3142 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3143 {
3144 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3145 {
3146 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3147 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3148 }
3149 }
3150
3151 /* Is it there? */
3152 if (!Idte.Gate.u1Present)
3153 {
3154 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3155 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3156 }
3157
3158 /* Is it a task-gate? */
3159 if (fTaskGate)
3160 {
3161 /*
3162 * Construct the error code masks based on what caused this task switch.
3163 * See Intel Instruction reference for INT.
3164 */
3165 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3166 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3167 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3168 RTSEL SelTSS = Idte.Gate.u16Sel;
3169
3170 /*
3171 * Fetch the TSS descriptor in the GDT.
3172 */
3173 IEMSELDESC DescTSS;
3174 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3175 if (rcStrict != VINF_SUCCESS)
3176 {
3177 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3178 VBOXSTRICTRC_VAL(rcStrict)));
3179 return rcStrict;
3180 }
3181
3182 /* The TSS descriptor must be a system segment and be available (not busy). */
3183 if ( DescTSS.Legacy.Gen.u1DescType
3184 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3185 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3186 {
3187 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3188 u8Vector, SelTSS, DescTSS.Legacy.au64));
3189 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3190 }
3191
3192 /* The TSS must be present. */
3193 if (!DescTSS.Legacy.Gen.u1Present)
3194 {
3195 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3196 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3197 }
3198
3199 /* Do the actual task switch. */
3200 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3201 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3202 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3203 }
3204
3205 /* A null CS is bad. */
3206 RTSEL NewCS = Idte.Gate.u16Sel;
3207 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3208 {
3209 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3210 return iemRaiseGeneralProtectionFault0(pVCpu);
3211 }
3212
3213 /* Fetch the descriptor for the new CS. */
3214 IEMSELDESC DescCS;
3215 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3216 if (rcStrict != VINF_SUCCESS)
3217 {
3218 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3219 return rcStrict;
3220 }
3221
3222 /* Must be a code segment. */
3223 if (!DescCS.Legacy.Gen.u1DescType)
3224 {
3225 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3226 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3227 }
3228 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3229 {
3230 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3231 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3232 }
3233
3234 /* Don't allow lowering the privilege level. */
3235 /** @todo Does the lowering of privileges apply to software interrupts
3236 * only? This has bearings on the more-privileged or
3237 * same-privilege stack behavior further down. A testcase would
3238 * be nice. */
3239 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3240 {
3241 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3242 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3243 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3244 }
3245
3246 /* Make sure the selector is present. */
3247 if (!DescCS.Legacy.Gen.u1Present)
3248 {
3249 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3250 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3251 }
3252
3253 /* Check the new EIP against the new CS limit. */
3254 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3255 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3256 ? Idte.Gate.u16OffsetLow
3257 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3258 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3259 if (uNewEip > cbLimitCS)
3260 {
3261 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3262 u8Vector, uNewEip, cbLimitCS, NewCS));
3263 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3264 }
3265 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3266
3267 /* Calc the flag image to push. */
3268 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3269 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3270 fEfl &= ~X86_EFL_RF;
3271 else
3272 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3273
3274 /* From V8086 mode only go to CPL 0. */
3275 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3276 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3277 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3278 {
3279 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3280 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3281 }
3282
3283 /*
3284 * If the privilege level changes, we need to get a new stack from the TSS.
3285 * This in turns means validating the new SS and ESP...
3286 */
3287 if (uNewCpl != IEM_GET_CPL(pVCpu))
3288 {
3289 RTSEL NewSS;
3290 uint32_t uNewEsp;
3291 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3292 if (rcStrict != VINF_SUCCESS)
3293 return rcStrict;
3294
3295 IEMSELDESC DescSS;
3296 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3297 if (rcStrict != VINF_SUCCESS)
3298 return rcStrict;
3299 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3300 if (!DescSS.Legacy.Gen.u1DefBig)
3301 {
3302 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3303 uNewEsp = (uint16_t)uNewEsp;
3304 }
3305
3306 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3307
3308 /* Check that there is sufficient space for the stack frame. */
3309 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3310 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3311 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3312 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3313
3314 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3315 {
3316 if ( uNewEsp - 1 > cbLimitSS
3317 || uNewEsp < cbStackFrame)
3318 {
3319 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3320 u8Vector, NewSS, uNewEsp, cbStackFrame));
3321 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3322 }
3323 }
3324 else
3325 {
3326 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3327 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3328 {
3329 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3330 u8Vector, NewSS, uNewEsp, cbStackFrame));
3331 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3332 }
3333 }
3334
3335 /*
3336 * Start making changes.
3337 */
3338
3339 /* Set the new CPL so that stack accesses use it. */
3340 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3341 IEM_SET_CPL(pVCpu, uNewCpl);
3342
3343 /* Create the stack frame. */
3344 RTPTRUNION uStackFrame;
3345 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3346 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3347 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3348 if (rcStrict != VINF_SUCCESS)
3349 return rcStrict;
3350 void * const pvStackFrame = uStackFrame.pv;
3351 if (f32BitGate)
3352 {
3353 if (fFlags & IEM_XCPT_FLAGS_ERR)
3354 *uStackFrame.pu32++ = uErr;
3355 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3356 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3357 uStackFrame.pu32[2] = fEfl;
3358 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3359 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3360 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3361 if (fEfl & X86_EFL_VM)
3362 {
3363 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3364 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3365 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3366 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3367 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3368 }
3369 }
3370 else
3371 {
3372 if (fFlags & IEM_XCPT_FLAGS_ERR)
3373 *uStackFrame.pu16++ = uErr;
3374 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3375 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3376 uStackFrame.pu16[2] = fEfl;
3377 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3378 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3379 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3380 if (fEfl & X86_EFL_VM)
3381 {
3382 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3383 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3384 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3385 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3386 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3387 }
3388 }
3389 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3390 if (rcStrict != VINF_SUCCESS)
3391 return rcStrict;
3392
3393 /* Mark the selectors 'accessed' (hope this is the correct time). */
3394 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3395 * after pushing the stack frame? (Write protect the gdt + stack to
3396 * find out.) */
3397 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3398 {
3399 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3400 if (rcStrict != VINF_SUCCESS)
3401 return rcStrict;
3402 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3403 }
3404
3405 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3406 {
3407 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3408 if (rcStrict != VINF_SUCCESS)
3409 return rcStrict;
3410 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3411 }
3412
3413 /*
3414 * Start comitting the register changes (joins with the DPL=CPL branch).
3415 */
3416 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3417 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3418 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3419 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3420 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3421 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3422 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3423 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3424 * SP is loaded).
3425 * Need to check the other combinations too:
3426 * - 16-bit TSS, 32-bit handler
3427 * - 32-bit TSS, 16-bit handler */
3428 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3429 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3430 else
3431 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3432
3433 if (fEfl & X86_EFL_VM)
3434 {
3435 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3436 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3437 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3438 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3439 }
3440 }
3441 /*
3442 * Same privilege, no stack change and smaller stack frame.
3443 */
3444 else
3445 {
3446 uint64_t uNewRsp;
3447 RTPTRUNION uStackFrame;
3448 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3449 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3450 if (rcStrict != VINF_SUCCESS)
3451 return rcStrict;
3452 void * const pvStackFrame = uStackFrame.pv;
3453
3454 if (f32BitGate)
3455 {
3456 if (fFlags & IEM_XCPT_FLAGS_ERR)
3457 *uStackFrame.pu32++ = uErr;
3458 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3459 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3460 uStackFrame.pu32[2] = fEfl;
3461 }
3462 else
3463 {
3464 if (fFlags & IEM_XCPT_FLAGS_ERR)
3465 *uStackFrame.pu16++ = uErr;
3466 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3467 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3468 uStackFrame.pu16[2] = fEfl;
3469 }
3470 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3471 if (rcStrict != VINF_SUCCESS)
3472 return rcStrict;
3473
3474 /* Mark the CS selector as 'accessed'. */
3475 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3476 {
3477 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3478 if (rcStrict != VINF_SUCCESS)
3479 return rcStrict;
3480 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3481 }
3482
3483 /*
3484 * Start committing the register changes (joins with the other branch).
3485 */
3486 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3487 }
3488
3489 /* ... register committing continues. */
3490 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3491 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3492 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3493 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3494 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3495 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3496
3497 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3498 fEfl &= ~fEflToClear;
3499 IEMMISC_SET_EFL(pVCpu, fEfl);
3500
3501 if (fFlags & IEM_XCPT_FLAGS_CR2)
3502 pVCpu->cpum.GstCtx.cr2 = uCr2;
3503
3504 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3505 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3506
3507 /* Make sure the execution flags are correct. */
3508 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3509 if (fExecNew != pVCpu->iem.s.fExec)
3510 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3511 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3512 pVCpu->iem.s.fExec = fExecNew;
3513 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3514
3515 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3516}
3517
3518
3519/**
3520 * Implements exceptions and interrupts for long mode.
3521 *
3522 * @returns VBox strict status code.
3523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3524 * @param cbInstr The number of bytes to offset rIP by in the return
3525 * address.
3526 * @param u8Vector The interrupt / exception vector number.
3527 * @param fFlags The flags.
3528 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3529 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3530 */
3531static VBOXSTRICTRC
3532iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3533 uint8_t cbInstr,
3534 uint8_t u8Vector,
3535 uint32_t fFlags,
3536 uint16_t uErr,
3537 uint64_t uCr2) RT_NOEXCEPT
3538{
3539 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3540
3541 /*
3542 * Read the IDT entry.
3543 */
3544 uint16_t offIdt = (uint16_t)u8Vector << 4;
3545 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3546 {
3547 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3548 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3549 }
3550 X86DESC64 Idte;
3551#ifdef _MSC_VER /* Shut up silly compiler warning. */
3552 Idte.au64[0] = 0;
3553 Idte.au64[1] = 0;
3554#endif
3555 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3556 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3557 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3558 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3559 {
3560 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3561 return rcStrict;
3562 }
3563 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3564 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3565 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3566
3567 /*
3568 * Check the descriptor type, DPL and such.
3569 * ASSUMES this is done in the same order as described for call-gate calls.
3570 */
3571 if (Idte.Gate.u1DescType)
3572 {
3573 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3574 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3575 }
3576 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3577 switch (Idte.Gate.u4Type)
3578 {
3579 case AMD64_SEL_TYPE_SYS_INT_GATE:
3580 fEflToClear |= X86_EFL_IF;
3581 break;
3582 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3583 break;
3584
3585 default:
3586 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3587 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3588 }
3589
3590 /* Check DPL against CPL if applicable. */
3591 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3592 {
3593 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3594 {
3595 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3596 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3597 }
3598 }
3599
3600 /* Is it there? */
3601 if (!Idte.Gate.u1Present)
3602 {
3603 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3604 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3605 }
3606
3607 /* A null CS is bad. */
3608 RTSEL NewCS = Idte.Gate.u16Sel;
3609 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3610 {
3611 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3612 return iemRaiseGeneralProtectionFault0(pVCpu);
3613 }
3614
3615 /* Fetch the descriptor for the new CS. */
3616 IEMSELDESC DescCS;
3617 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3618 if (rcStrict != VINF_SUCCESS)
3619 {
3620 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3621 return rcStrict;
3622 }
3623
3624 /* Must be a 64-bit code segment. */
3625 if (!DescCS.Long.Gen.u1DescType)
3626 {
3627 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3628 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3629 }
3630 if ( !DescCS.Long.Gen.u1Long
3631 || DescCS.Long.Gen.u1DefBig
3632 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3633 {
3634 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3635 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3636 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3637 }
3638
3639 /* Don't allow lowering the privilege level. For non-conforming CS
3640 selectors, the CS.DPL sets the privilege level the trap/interrupt
3641 handler runs at. For conforming CS selectors, the CPL remains
3642 unchanged, but the CS.DPL must be <= CPL. */
3643 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3644 * when CPU in Ring-0. Result \#GP? */
3645 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3648 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3649 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3650 }
3651
3652
3653 /* Make sure the selector is present. */
3654 if (!DescCS.Legacy.Gen.u1Present)
3655 {
3656 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3657 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3658 }
3659
3660 /* Check that the new RIP is canonical. */
3661 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3662 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3663 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3664 if (!IEM_IS_CANONICAL(uNewRip))
3665 {
3666 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3667 return iemRaiseGeneralProtectionFault0(pVCpu);
3668 }
3669
3670 /*
3671 * If the privilege level changes or if the IST isn't zero, we need to get
3672 * a new stack from the TSS.
3673 */
3674 uint64_t uNewRsp;
3675 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3676 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3677 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3678 || Idte.Gate.u3IST != 0)
3679 {
3680 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3681 if (rcStrict != VINF_SUCCESS)
3682 return rcStrict;
3683 }
3684 else
3685 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3686 uNewRsp &= ~(uint64_t)0xf;
3687
3688 /*
3689 * Calc the flag image to push.
3690 */
3691 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3692 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3693 fEfl &= ~X86_EFL_RF;
3694 else
3695 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3696
3697 /*
3698 * Start making changes.
3699 */
3700 /* Set the new CPL so that stack accesses use it. */
3701 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3702 IEM_SET_CPL(pVCpu, uNewCpl);
3703/** @todo Setting CPL this early seems wrong as it would affect and errors we
3704 * raise accessing the stack and (?) GDT/LDT... */
3705
3706 /* Create the stack frame. */
3707 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3708 RTPTRUNION uStackFrame;
3709 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3710 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3711 if (rcStrict != VINF_SUCCESS)
3712 return rcStrict;
3713 void * const pvStackFrame = uStackFrame.pv;
3714
3715 if (fFlags & IEM_XCPT_FLAGS_ERR)
3716 *uStackFrame.pu64++ = uErr;
3717 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3718 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3719 uStackFrame.pu64[2] = fEfl;
3720 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3721 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3722 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3723 if (rcStrict != VINF_SUCCESS)
3724 return rcStrict;
3725
3726 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3727 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3728 * after pushing the stack frame? (Write protect the gdt + stack to
3729 * find out.) */
3730 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3731 {
3732 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3733 if (rcStrict != VINF_SUCCESS)
3734 return rcStrict;
3735 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3736 }
3737
3738 /*
3739 * Start comitting the register changes.
3740 */
3741 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3742 * hidden registers when interrupting 32-bit or 16-bit code! */
3743 if (uNewCpl != uOldCpl)
3744 {
3745 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3746 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3747 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3748 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3749 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3750 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3751 }
3752 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3753 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3754 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3755 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3756 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3757 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3758 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3759 pVCpu->cpum.GstCtx.rip = uNewRip;
3760
3761 fEfl &= ~fEflToClear;
3762 IEMMISC_SET_EFL(pVCpu, fEfl);
3763
3764 if (fFlags & IEM_XCPT_FLAGS_CR2)
3765 pVCpu->cpum.GstCtx.cr2 = uCr2;
3766
3767 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3768 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3769
3770 iemRecalcExecModeAndCplFlags(pVCpu);
3771
3772 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3773}
3774
3775
3776/**
3777 * Implements exceptions and interrupts.
3778 *
3779 * All exceptions and interrupts goes thru this function!
3780 *
3781 * @returns VBox strict status code.
3782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3783 * @param cbInstr The number of bytes to offset rIP by in the return
3784 * address.
3785 * @param u8Vector The interrupt / exception vector number.
3786 * @param fFlags The flags.
3787 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3788 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3789 */
3790VBOXSTRICTRC
3791iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3792 uint8_t cbInstr,
3793 uint8_t u8Vector,
3794 uint32_t fFlags,
3795 uint16_t uErr,
3796 uint64_t uCr2) RT_NOEXCEPT
3797{
3798 /*
3799 * Get all the state that we might need here.
3800 */
3801 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3802 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3803
3804#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3805 /*
3806 * Flush prefetch buffer
3807 */
3808 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3809#endif
3810
3811 /*
3812 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3813 */
3814 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3815 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3816 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3817 | IEM_XCPT_FLAGS_BP_INSTR
3818 | IEM_XCPT_FLAGS_ICEBP_INSTR
3819 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3820 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3821 {
3822 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3823 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3824 u8Vector = X86_XCPT_GP;
3825 uErr = 0;
3826 }
3827#ifdef DBGFTRACE_ENABLED
3828 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3829 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3830 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3831#endif
3832
3833 /*
3834 * Evaluate whether NMI blocking should be in effect.
3835 * Normally, NMI blocking is in effect whenever we inject an NMI.
3836 */
3837 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3838 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3839
3840#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3841 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3842 {
3843 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3844 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3845 return rcStrict0;
3846
3847 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3848 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3849 {
3850 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3851 fBlockNmi = false;
3852 }
3853 }
3854#endif
3855
3856#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3857 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3858 {
3859 /*
3860 * If the event is being injected as part of VMRUN, it isn't subject to event
3861 * intercepts in the nested-guest. However, secondary exceptions that occur
3862 * during injection of any event -are- subject to exception intercepts.
3863 *
3864 * See AMD spec. 15.20 "Event Injection".
3865 */
3866 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3867 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3868 else
3869 {
3870 /*
3871 * Check and handle if the event being raised is intercepted.
3872 */
3873 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3874 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3875 return rcStrict0;
3876 }
3877 }
3878#endif
3879
3880 /*
3881 * Set NMI blocking if necessary.
3882 */
3883 if (fBlockNmi)
3884 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3885
3886 /*
3887 * Do recursion accounting.
3888 */
3889 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3890 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3891 if (pVCpu->iem.s.cXcptRecursions == 0)
3892 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3893 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3894 else
3895 {
3896 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3897 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3898 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3899
3900 if (pVCpu->iem.s.cXcptRecursions >= 4)
3901 {
3902#ifdef DEBUG_bird
3903 AssertFailed();
3904#endif
3905 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3906 }
3907
3908 /*
3909 * Evaluate the sequence of recurring events.
3910 */
3911 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3912 NULL /* pXcptRaiseInfo */);
3913 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3914 { /* likely */ }
3915 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3916 {
3917 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3918 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3919 u8Vector = X86_XCPT_DF;
3920 uErr = 0;
3921#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3922 /* VMX nested-guest #DF intercept needs to be checked here. */
3923 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3924 {
3925 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3926 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3927 return rcStrict0;
3928 }
3929#endif
3930 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3931 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3932 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3933 }
3934 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3935 {
3936 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3937 return iemInitiateCpuShutdown(pVCpu);
3938 }
3939 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3940 {
3941 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3942 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3943 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3944 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3945 return VERR_EM_GUEST_CPU_HANG;
3946 }
3947 else
3948 {
3949 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3950 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3951 return VERR_IEM_IPE_9;
3952 }
3953
3954 /*
3955 * The 'EXT' bit is set when an exception occurs during deliver of an external
3956 * event (such as an interrupt or earlier exception)[1]. Privileged software
3957 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3958 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3959 *
3960 * [1] - Intel spec. 6.13 "Error Code"
3961 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3962 * [3] - Intel Instruction reference for INT n.
3963 */
3964 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3965 && (fFlags & IEM_XCPT_FLAGS_ERR)
3966 && u8Vector != X86_XCPT_PF
3967 && u8Vector != X86_XCPT_DF)
3968 {
3969 uErr |= X86_TRAP_ERR_EXTERNAL;
3970 }
3971 }
3972
3973 pVCpu->iem.s.cXcptRecursions++;
3974 pVCpu->iem.s.uCurXcpt = u8Vector;
3975 pVCpu->iem.s.fCurXcpt = fFlags;
3976 pVCpu->iem.s.uCurXcptErr = uErr;
3977 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3978
3979 /*
3980 * Extensive logging.
3981 */
3982#if defined(LOG_ENABLED) && defined(IN_RING3)
3983 if (LogIs3Enabled())
3984 {
3985 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3986 PVM pVM = pVCpu->CTX_SUFF(pVM);
3987 char szRegs[4096];
3988 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3989 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3990 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3991 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3992 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3993 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3994 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3995 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3996 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3997 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3998 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3999 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4000 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4001 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4002 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4003 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4004 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4005 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4006 " efer=%016VR{efer}\n"
4007 " pat=%016VR{pat}\n"
4008 " sf_mask=%016VR{sf_mask}\n"
4009 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4010 " lstar=%016VR{lstar}\n"
4011 " star=%016VR{star} cstar=%016VR{cstar}\n"
4012 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4013 );
4014
4015 char szInstr[256];
4016 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4017 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4018 szInstr, sizeof(szInstr), NULL);
4019 Log3(("%s%s\n", szRegs, szInstr));
4020 }
4021#endif /* LOG_ENABLED */
4022
4023 /*
4024 * Stats.
4025 */
4026 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4027 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4028 else if (u8Vector <= X86_XCPT_LAST)
4029 {
4030 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4031 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4032 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4033 }
4034
4035 /*
4036 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4037 * to ensure that a stale TLB or paging cache entry will only cause one
4038 * spurious #PF.
4039 */
4040 if ( u8Vector == X86_XCPT_PF
4041 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4042 IEMTlbInvalidatePage(pVCpu, uCr2);
4043
4044 /*
4045 * Call the mode specific worker function.
4046 */
4047 VBOXSTRICTRC rcStrict;
4048 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4049 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4050 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4051 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4052 else
4053 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4054
4055 /* Flush the prefetch buffer. */
4056#ifdef IEM_WITH_CODE_TLB
4057 pVCpu->iem.s.pbInstrBuf = NULL;
4058#else
4059 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4060#endif
4061
4062 /*
4063 * Unwind.
4064 */
4065 pVCpu->iem.s.cXcptRecursions--;
4066 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4067 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4068 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4069 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4070 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4071 return rcStrict;
4072}
4073
4074#ifdef IEM_WITH_SETJMP
4075/**
4076 * See iemRaiseXcptOrInt. Will not return.
4077 */
4078DECL_NO_RETURN(void)
4079iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4080 uint8_t cbInstr,
4081 uint8_t u8Vector,
4082 uint32_t fFlags,
4083 uint16_t uErr,
4084 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4085{
4086 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4087 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4088}
4089#endif
4090
4091
4092/** \#DE - 00. */
4093VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4094{
4095 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4096}
4097
4098
4099/** \#DB - 01.
4100 * @note This automatically clear DR7.GD. */
4101VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4102{
4103 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4104 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4105 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4106}
4107
4108
4109/** \#BR - 05. */
4110VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4111{
4112 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4113}
4114
4115
4116/** \#UD - 06. */
4117VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4118{
4119 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4120}
4121
4122
4123/** \#NM - 07. */
4124VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4125{
4126 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4127}
4128
4129
4130/** \#TS(err) - 0a. */
4131VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4132{
4133 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4134}
4135
4136
4137/** \#TS(tr) - 0a. */
4138VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4139{
4140 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4141 pVCpu->cpum.GstCtx.tr.Sel, 0);
4142}
4143
4144
4145/** \#TS(0) - 0a. */
4146VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4147{
4148 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4149 0, 0);
4150}
4151
4152
4153/** \#TS(err) - 0a. */
4154VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4155{
4156 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4157 uSel & X86_SEL_MASK_OFF_RPL, 0);
4158}
4159
4160
4161/** \#NP(err) - 0b. */
4162VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4163{
4164 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4165}
4166
4167
4168/** \#NP(sel) - 0b. */
4169VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4170{
4171 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4172 uSel & ~X86_SEL_RPL, 0);
4173}
4174
4175
4176/** \#SS(seg) - 0c. */
4177VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4178{
4179 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4180 uSel & ~X86_SEL_RPL, 0);
4181}
4182
4183
4184/** \#SS(err) - 0c. */
4185VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4186{
4187 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4188}
4189
4190
4191/** \#GP(n) - 0d. */
4192VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4193{
4194 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4195}
4196
4197
4198/** \#GP(0) - 0d. */
4199VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4200{
4201 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4202}
4203
4204#ifdef IEM_WITH_SETJMP
4205/** \#GP(0) - 0d. */
4206DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4207{
4208 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4209}
4210#endif
4211
4212
4213/** \#GP(sel) - 0d. */
4214VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4215{
4216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4217 Sel & ~X86_SEL_RPL, 0);
4218}
4219
4220
4221/** \#GP(0) - 0d. */
4222VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4223{
4224 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4225}
4226
4227
4228/** \#GP(sel) - 0d. */
4229VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4230{
4231 NOREF(iSegReg); NOREF(fAccess);
4232 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4233 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4234}
4235
4236#ifdef IEM_WITH_SETJMP
4237/** \#GP(sel) - 0d, longjmp. */
4238DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4239{
4240 NOREF(iSegReg); NOREF(fAccess);
4241 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4242 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4243}
4244#endif
4245
4246/** \#GP(sel) - 0d. */
4247VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4248{
4249 NOREF(Sel);
4250 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4251}
4252
4253#ifdef IEM_WITH_SETJMP
4254/** \#GP(sel) - 0d, longjmp. */
4255DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4256{
4257 NOREF(Sel);
4258 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4259}
4260#endif
4261
4262
4263/** \#GP(sel) - 0d. */
4264VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4265{
4266 NOREF(iSegReg); NOREF(fAccess);
4267 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4268}
4269
4270#ifdef IEM_WITH_SETJMP
4271/** \#GP(sel) - 0d, longjmp. */
4272DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4273{
4274 NOREF(iSegReg); NOREF(fAccess);
4275 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4276}
4277#endif
4278
4279
4280/** \#PF(n) - 0e. */
4281VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4282{
4283 uint16_t uErr;
4284 switch (rc)
4285 {
4286 case VERR_PAGE_NOT_PRESENT:
4287 case VERR_PAGE_TABLE_NOT_PRESENT:
4288 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4289 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4290 uErr = 0;
4291 break;
4292
4293 default:
4294 AssertMsgFailed(("%Rrc\n", rc));
4295 RT_FALL_THRU();
4296 case VERR_ACCESS_DENIED:
4297 uErr = X86_TRAP_PF_P;
4298 break;
4299
4300 /** @todo reserved */
4301 }
4302
4303 if (IEM_GET_CPL(pVCpu) == 3)
4304 uErr |= X86_TRAP_PF_US;
4305
4306 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4307 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4308 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4309 uErr |= X86_TRAP_PF_ID;
4310
4311#if 0 /* This is so much non-sense, really. Why was it done like that? */
4312 /* Note! RW access callers reporting a WRITE protection fault, will clear
4313 the READ flag before calling. So, read-modify-write accesses (RW)
4314 can safely be reported as READ faults. */
4315 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4316 uErr |= X86_TRAP_PF_RW;
4317#else
4318 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4319 {
4320 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4321 /// (regardless of outcome of the comparison in the latter case).
4322 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4323 uErr |= X86_TRAP_PF_RW;
4324 }
4325#endif
4326
4327 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4328 of the memory operand rather than at the start of it. (Not sure what
4329 happens if it crosses a page boundrary.) The current heuristics for
4330 this is to report the #PF for the last byte if the access is more than
4331 64 bytes. This is probably not correct, but we can work that out later,
4332 main objective now is to get FXSAVE to work like for real hardware and
4333 make bs3-cpu-basic2 work. */
4334 if (cbAccess <= 64)
4335 { /* likely*/ }
4336 else
4337 GCPtrWhere += cbAccess - 1;
4338
4339 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4340 uErr, GCPtrWhere);
4341}
4342
4343#ifdef IEM_WITH_SETJMP
4344/** \#PF(n) - 0e, longjmp. */
4345DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4346 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4347{
4348 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4349}
4350#endif
4351
4352
4353/** \#MF(0) - 10. */
4354VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4355{
4356 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4357 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4358
4359 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4360 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4361 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4362}
4363
4364
4365/** \#AC(0) - 11. */
4366VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4367{
4368 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4369}
4370
4371#ifdef IEM_WITH_SETJMP
4372/** \#AC(0) - 11, longjmp. */
4373DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4374{
4375 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4376}
4377#endif
4378
4379
4380/** \#XF(0)/\#XM(0) - 19. */
4381VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4382{
4383 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4384}
4385
4386
4387/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4388IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4389{
4390 NOREF(cbInstr);
4391 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4392}
4393
4394
4395/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4396IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4397{
4398 NOREF(cbInstr);
4399 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4400}
4401
4402
4403/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4404IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4405{
4406 NOREF(cbInstr);
4407 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4408}
4409
4410
4411/** @} */
4412
4413/** @name Common opcode decoders.
4414 * @{
4415 */
4416//#include <iprt/mem.h>
4417
4418/**
4419 * Used to add extra details about a stub case.
4420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4421 */
4422void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4423{
4424#if defined(LOG_ENABLED) && defined(IN_RING3)
4425 PVM pVM = pVCpu->CTX_SUFF(pVM);
4426 char szRegs[4096];
4427 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4428 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4429 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4430 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4431 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4432 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4433 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4434 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4435 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4436 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4437 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4438 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4439 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4440 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4441 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4442 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4443 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4444 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4445 " efer=%016VR{efer}\n"
4446 " pat=%016VR{pat}\n"
4447 " sf_mask=%016VR{sf_mask}\n"
4448 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4449 " lstar=%016VR{lstar}\n"
4450 " star=%016VR{star} cstar=%016VR{cstar}\n"
4451 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4452 );
4453
4454 char szInstr[256];
4455 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4456 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4457 szInstr, sizeof(szInstr), NULL);
4458
4459 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4460#else
4461 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4462#endif
4463}
4464
4465/** @} */
4466
4467
4468
4469/** @name Register Access.
4470 * @{
4471 */
4472
4473/**
4474 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4475 *
4476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4477 * segment limit.
4478 *
4479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4480 * @param cbInstr Instruction size.
4481 * @param offNextInstr The offset of the next instruction.
4482 * @param enmEffOpSize Effective operand size.
4483 */
4484VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4485 IEMMODE enmEffOpSize) RT_NOEXCEPT
4486{
4487 switch (enmEffOpSize)
4488 {
4489 case IEMMODE_16BIT:
4490 {
4491 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4492 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4493 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4494 pVCpu->cpum.GstCtx.rip = uNewIp;
4495 else
4496 return iemRaiseGeneralProtectionFault0(pVCpu);
4497 break;
4498 }
4499
4500 case IEMMODE_32BIT:
4501 {
4502 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4503 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4504
4505 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4506 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4507 pVCpu->cpum.GstCtx.rip = uNewEip;
4508 else
4509 return iemRaiseGeneralProtectionFault0(pVCpu);
4510 break;
4511 }
4512
4513 case IEMMODE_64BIT:
4514 {
4515 Assert(IEM_IS_64BIT_CODE(pVCpu));
4516
4517 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4518 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4519 pVCpu->cpum.GstCtx.rip = uNewRip;
4520 else
4521 return iemRaiseGeneralProtectionFault0(pVCpu);
4522 break;
4523 }
4524
4525 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4526 }
4527
4528#ifndef IEM_WITH_CODE_TLB
4529 /* Flush the prefetch buffer. */
4530 pVCpu->iem.s.cbOpcode = cbInstr;
4531#endif
4532
4533 /*
4534 * Clear RF and finish the instruction (maybe raise #DB).
4535 */
4536 return iemRegFinishClearingRF(pVCpu);
4537}
4538
4539
4540/**
4541 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4542 *
4543 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4544 * segment limit.
4545 *
4546 * @returns Strict VBox status code.
4547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4548 * @param cbInstr Instruction size.
4549 * @param offNextInstr The offset of the next instruction.
4550 */
4551VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4552{
4553 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4554
4555 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4556 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4557 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4558 pVCpu->cpum.GstCtx.rip = uNewIp;
4559 else
4560 return iemRaiseGeneralProtectionFault0(pVCpu);
4561
4562#ifndef IEM_WITH_CODE_TLB
4563 /* Flush the prefetch buffer. */
4564 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4565#endif
4566
4567 /*
4568 * Clear RF and finish the instruction (maybe raise #DB).
4569 */
4570 return iemRegFinishClearingRF(pVCpu);
4571}
4572
4573
4574/**
4575 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4576 *
4577 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4578 * segment limit.
4579 *
4580 * @returns Strict VBox status code.
4581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4582 * @param cbInstr Instruction size.
4583 * @param offNextInstr The offset of the next instruction.
4584 * @param enmEffOpSize Effective operand size.
4585 */
4586VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4587 IEMMODE enmEffOpSize) RT_NOEXCEPT
4588{
4589 if (enmEffOpSize == IEMMODE_32BIT)
4590 {
4591 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4592
4593 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4594 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4595 pVCpu->cpum.GstCtx.rip = uNewEip;
4596 else
4597 return iemRaiseGeneralProtectionFault0(pVCpu);
4598 }
4599 else
4600 {
4601 Assert(enmEffOpSize == IEMMODE_64BIT);
4602
4603 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4604 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4605 pVCpu->cpum.GstCtx.rip = uNewRip;
4606 else
4607 return iemRaiseGeneralProtectionFault0(pVCpu);
4608 }
4609
4610#ifndef IEM_WITH_CODE_TLB
4611 /* Flush the prefetch buffer. */
4612 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4613#endif
4614
4615 /*
4616 * Clear RF and finish the instruction (maybe raise #DB).
4617 */
4618 return iemRegFinishClearingRF(pVCpu);
4619}
4620
4621
4622/**
4623 * Performs a near jump to the specified address.
4624 *
4625 * May raise a \#GP(0) if the new IP outside the code segment limit.
4626 *
4627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4628 * @param uNewIp The new IP value.
4629 */
4630VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4631{
4632 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4633 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4634 pVCpu->cpum.GstCtx.rip = uNewIp;
4635 else
4636 return iemRaiseGeneralProtectionFault0(pVCpu);
4637 /** @todo Test 16-bit jump in 64-bit mode. */
4638
4639#ifndef IEM_WITH_CODE_TLB
4640 /* Flush the prefetch buffer. */
4641 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4642#endif
4643
4644 /*
4645 * Clear RF and finish the instruction (maybe raise #DB).
4646 */
4647 return iemRegFinishClearingRF(pVCpu);
4648}
4649
4650
4651/**
4652 * Performs a near jump to the specified address.
4653 *
4654 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4655 *
4656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4657 * @param uNewEip The new EIP value.
4658 */
4659VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4660{
4661 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4662 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4663
4664 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4665 pVCpu->cpum.GstCtx.rip = uNewEip;
4666 else
4667 return iemRaiseGeneralProtectionFault0(pVCpu);
4668
4669#ifndef IEM_WITH_CODE_TLB
4670 /* Flush the prefetch buffer. */
4671 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4672#endif
4673
4674 /*
4675 * Clear RF and finish the instruction (maybe raise #DB).
4676 */
4677 return iemRegFinishClearingRF(pVCpu);
4678}
4679
4680
4681/**
4682 * Performs a near jump to the specified address.
4683 *
4684 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4685 * segment limit.
4686 *
4687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4688 * @param uNewRip The new RIP value.
4689 */
4690VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4691{
4692 Assert(IEM_IS_64BIT_CODE(pVCpu));
4693
4694 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4695 pVCpu->cpum.GstCtx.rip = uNewRip;
4696 else
4697 return iemRaiseGeneralProtectionFault0(pVCpu);
4698
4699#ifndef IEM_WITH_CODE_TLB
4700 /* Flush the prefetch buffer. */
4701 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4702#endif
4703
4704 /*
4705 * Clear RF and finish the instruction (maybe raise #DB).
4706 */
4707 return iemRegFinishClearingRF(pVCpu);
4708}
4709
4710/** @} */
4711
4712
4713/** @name FPU access and helpers.
4714 *
4715 * @{
4716 */
4717
4718/**
4719 * Updates the x87.DS and FPUDP registers.
4720 *
4721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4722 * @param pFpuCtx The FPU context.
4723 * @param iEffSeg The effective segment register.
4724 * @param GCPtrEff The effective address relative to @a iEffSeg.
4725 */
4726DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4727{
4728 RTSEL sel;
4729 switch (iEffSeg)
4730 {
4731 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4732 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4733 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4734 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4735 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4736 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4737 default:
4738 AssertMsgFailed(("%d\n", iEffSeg));
4739 sel = pVCpu->cpum.GstCtx.ds.Sel;
4740 }
4741 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4742 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4743 {
4744 pFpuCtx->DS = 0;
4745 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4746 }
4747 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4748 {
4749 pFpuCtx->DS = sel;
4750 pFpuCtx->FPUDP = GCPtrEff;
4751 }
4752 else
4753 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4754}
4755
4756
4757/**
4758 * Rotates the stack registers in the push direction.
4759 *
4760 * @param pFpuCtx The FPU context.
4761 * @remarks This is a complete waste of time, but fxsave stores the registers in
4762 * stack order.
4763 */
4764DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4765{
4766 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4767 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4768 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4769 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4770 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4771 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4772 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4773 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4774 pFpuCtx->aRegs[0].r80 = r80Tmp;
4775}
4776
4777
4778/**
4779 * Rotates the stack registers in the pop direction.
4780 *
4781 * @param pFpuCtx The FPU context.
4782 * @remarks This is a complete waste of time, but fxsave stores the registers in
4783 * stack order.
4784 */
4785DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4786{
4787 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4788 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4789 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4790 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4791 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4792 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4793 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4794 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4795 pFpuCtx->aRegs[7].r80 = r80Tmp;
4796}
4797
4798
4799/**
4800 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4801 * exception prevents it.
4802 *
4803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4804 * @param pResult The FPU operation result to push.
4805 * @param pFpuCtx The FPU context.
4806 */
4807static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4808{
4809 /* Update FSW and bail if there are pending exceptions afterwards. */
4810 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4811 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4812 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4813 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4814 {
4815 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4816 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4817 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4818 pFpuCtx->FSW = fFsw;
4819 return;
4820 }
4821
4822 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4823 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4824 {
4825 /* All is fine, push the actual value. */
4826 pFpuCtx->FTW |= RT_BIT(iNewTop);
4827 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4828 }
4829 else if (pFpuCtx->FCW & X86_FCW_IM)
4830 {
4831 /* Masked stack overflow, push QNaN. */
4832 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4833 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4834 }
4835 else
4836 {
4837 /* Raise stack overflow, don't push anything. */
4838 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4839 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4840 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4841 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4842 return;
4843 }
4844
4845 fFsw &= ~X86_FSW_TOP_MASK;
4846 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4847 pFpuCtx->FSW = fFsw;
4848
4849 iemFpuRotateStackPush(pFpuCtx);
4850 RT_NOREF(pVCpu);
4851}
4852
4853
4854/**
4855 * Stores a result in a FPU register and updates the FSW and FTW.
4856 *
4857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4858 * @param pFpuCtx The FPU context.
4859 * @param pResult The result to store.
4860 * @param iStReg Which FPU register to store it in.
4861 */
4862static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4863{
4864 Assert(iStReg < 8);
4865 uint16_t fNewFsw = pFpuCtx->FSW;
4866 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4867 fNewFsw &= ~X86_FSW_C_MASK;
4868 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4869 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4870 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4871 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4872 pFpuCtx->FSW = fNewFsw;
4873 pFpuCtx->FTW |= RT_BIT(iReg);
4874 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4875 RT_NOREF(pVCpu);
4876}
4877
4878
4879/**
4880 * Only updates the FPU status word (FSW) with the result of the current
4881 * instruction.
4882 *
4883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4884 * @param pFpuCtx The FPU context.
4885 * @param u16FSW The FSW output of the current instruction.
4886 */
4887static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4888{
4889 uint16_t fNewFsw = pFpuCtx->FSW;
4890 fNewFsw &= ~X86_FSW_C_MASK;
4891 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4892 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4893 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4894 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4895 pFpuCtx->FSW = fNewFsw;
4896 RT_NOREF(pVCpu);
4897}
4898
4899
4900/**
4901 * Pops one item off the FPU stack if no pending exception prevents it.
4902 *
4903 * @param pFpuCtx The FPU context.
4904 */
4905static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4906{
4907 /* Check pending exceptions. */
4908 uint16_t uFSW = pFpuCtx->FSW;
4909 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4910 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4911 return;
4912
4913 /* TOP--. */
4914 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4915 uFSW &= ~X86_FSW_TOP_MASK;
4916 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4917 pFpuCtx->FSW = uFSW;
4918
4919 /* Mark the previous ST0 as empty. */
4920 iOldTop >>= X86_FSW_TOP_SHIFT;
4921 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4922
4923 /* Rotate the registers. */
4924 iemFpuRotateStackPop(pFpuCtx);
4925}
4926
4927
4928/**
4929 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4930 *
4931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4932 * @param pResult The FPU operation result to push.
4933 */
4934void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4935{
4936 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4937 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4938 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4939}
4940
4941
4942/**
4943 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4944 * and sets FPUDP and FPUDS.
4945 *
4946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4947 * @param pResult The FPU operation result to push.
4948 * @param iEffSeg The effective segment register.
4949 * @param GCPtrEff The effective address relative to @a iEffSeg.
4950 */
4951void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4952{
4953 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4954 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4955 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4956 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4957}
4958
4959
4960/**
4961 * Replace ST0 with the first value and push the second onto the FPU stack,
4962 * unless a pending exception prevents it.
4963 *
4964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4965 * @param pResult The FPU operation result to store and push.
4966 */
4967void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4968{
4969 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4970 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4971
4972 /* Update FSW and bail if there are pending exceptions afterwards. */
4973 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4974 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4975 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4976 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4977 {
4978 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4979 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4980 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4981 pFpuCtx->FSW = fFsw;
4982 return;
4983 }
4984
4985 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4986 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4987 {
4988 /* All is fine, push the actual value. */
4989 pFpuCtx->FTW |= RT_BIT(iNewTop);
4990 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4991 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4992 }
4993 else if (pFpuCtx->FCW & X86_FCW_IM)
4994 {
4995 /* Masked stack overflow, push QNaN. */
4996 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4997 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4998 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4999 }
5000 else
5001 {
5002 /* Raise stack overflow, don't push anything. */
5003 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5004 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5005 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5006 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5007 return;
5008 }
5009
5010 fFsw &= ~X86_FSW_TOP_MASK;
5011 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5012 pFpuCtx->FSW = fFsw;
5013
5014 iemFpuRotateStackPush(pFpuCtx);
5015}
5016
5017
5018/**
5019 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5020 * FOP.
5021 *
5022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5023 * @param pResult The result to store.
5024 * @param iStReg Which FPU register to store it in.
5025 */
5026void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5027{
5028 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5029 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5030 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5031}
5032
5033
5034/**
5035 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5036 * FOP, and then pops the stack.
5037 *
5038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5039 * @param pResult The result to store.
5040 * @param iStReg Which FPU register to store it in.
5041 */
5042void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5043{
5044 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5045 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5046 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5047 iemFpuMaybePopOne(pFpuCtx);
5048}
5049
5050
5051/**
5052 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5053 * FPUDP, and FPUDS.
5054 *
5055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5056 * @param pResult The result to store.
5057 * @param iStReg Which FPU register to store it in.
5058 * @param iEffSeg The effective memory operand selector register.
5059 * @param GCPtrEff The effective memory operand offset.
5060 */
5061void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5062 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5063{
5064 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5065 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5066 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5067 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5068}
5069
5070
5071/**
5072 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5073 * FPUDP, and FPUDS, and then pops the stack.
5074 *
5075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5076 * @param pResult The result to store.
5077 * @param iStReg Which FPU register to store it in.
5078 * @param iEffSeg The effective memory operand selector register.
5079 * @param GCPtrEff The effective memory operand offset.
5080 */
5081void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5082 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5083{
5084 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5085 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5086 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5087 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5088 iemFpuMaybePopOne(pFpuCtx);
5089}
5090
5091
5092/**
5093 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5094 *
5095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5096 */
5097void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
5098{
5099 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5100 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5101}
5102
5103
5104/**
5105 * Updates the FSW, FOP, FPUIP, and FPUCS.
5106 *
5107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5108 * @param u16FSW The FSW from the current instruction.
5109 */
5110void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5111{
5112 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5113 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5114 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5115}
5116
5117
5118/**
5119 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5120 *
5121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5122 * @param u16FSW The FSW from the current instruction.
5123 */
5124void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5125{
5126 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5127 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5128 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5129 iemFpuMaybePopOne(pFpuCtx);
5130}
5131
5132
5133/**
5134 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5135 *
5136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5137 * @param u16FSW The FSW from the current instruction.
5138 * @param iEffSeg The effective memory operand selector register.
5139 * @param GCPtrEff The effective memory operand offset.
5140 */
5141void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5142{
5143 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5144 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5145 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5146 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5147}
5148
5149
5150/**
5151 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5152 *
5153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5154 * @param u16FSW The FSW from the current instruction.
5155 */
5156void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5157{
5158 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5159 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5160 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5161 iemFpuMaybePopOne(pFpuCtx);
5162 iemFpuMaybePopOne(pFpuCtx);
5163}
5164
5165
5166/**
5167 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5168 *
5169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5170 * @param u16FSW The FSW from the current instruction.
5171 * @param iEffSeg The effective memory operand selector register.
5172 * @param GCPtrEff The effective memory operand offset.
5173 */
5174void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5175{
5176 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5177 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5178 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5179 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5180 iemFpuMaybePopOne(pFpuCtx);
5181}
5182
5183
5184/**
5185 * Worker routine for raising an FPU stack underflow exception.
5186 *
5187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5188 * @param pFpuCtx The FPU context.
5189 * @param iStReg The stack register being accessed.
5190 */
5191static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5192{
5193 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5194 if (pFpuCtx->FCW & X86_FCW_IM)
5195 {
5196 /* Masked underflow. */
5197 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5198 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5199 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5200 if (iStReg != UINT8_MAX)
5201 {
5202 pFpuCtx->FTW |= RT_BIT(iReg);
5203 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5204 }
5205 }
5206 else
5207 {
5208 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5209 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5210 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5211 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5212 }
5213 RT_NOREF(pVCpu);
5214}
5215
5216
5217/**
5218 * Raises a FPU stack underflow exception.
5219 *
5220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5221 * @param iStReg The destination register that should be loaded
5222 * with QNaN if \#IS is not masked. Specify
5223 * UINT8_MAX if none (like for fcom).
5224 */
5225void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5226{
5227 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5228 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5229 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5230}
5231
5232
5233void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5234{
5235 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5236 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5237 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5238 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5239}
5240
5241
5242void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5243{
5244 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5245 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5246 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5247 iemFpuMaybePopOne(pFpuCtx);
5248}
5249
5250
5251void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5252{
5253 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5254 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5255 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5256 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5257 iemFpuMaybePopOne(pFpuCtx);
5258}
5259
5260
5261void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5262{
5263 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5264 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5265 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5266 iemFpuMaybePopOne(pFpuCtx);
5267 iemFpuMaybePopOne(pFpuCtx);
5268}
5269
5270
5271void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5272{
5273 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5274 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5275
5276 if (pFpuCtx->FCW & X86_FCW_IM)
5277 {
5278 /* Masked overflow - Push QNaN. */
5279 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5280 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5281 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5282 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5283 pFpuCtx->FTW |= RT_BIT(iNewTop);
5284 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5285 iemFpuRotateStackPush(pFpuCtx);
5286 }
5287 else
5288 {
5289 /* Exception pending - don't change TOP or the register stack. */
5290 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5291 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5292 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5293 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5294 }
5295}
5296
5297
5298void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5299{
5300 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5301 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5302
5303 if (pFpuCtx->FCW & X86_FCW_IM)
5304 {
5305 /* Masked overflow - Push QNaN. */
5306 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5307 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5308 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5309 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5310 pFpuCtx->FTW |= RT_BIT(iNewTop);
5311 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5312 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5313 iemFpuRotateStackPush(pFpuCtx);
5314 }
5315 else
5316 {
5317 /* Exception pending - don't change TOP or the register stack. */
5318 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5319 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5320 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5321 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5322 }
5323}
5324
5325
5326/**
5327 * Worker routine for raising an FPU stack overflow exception on a push.
5328 *
5329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5330 * @param pFpuCtx The FPU context.
5331 */
5332static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5333{
5334 if (pFpuCtx->FCW & X86_FCW_IM)
5335 {
5336 /* Masked overflow. */
5337 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5338 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5339 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5340 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5341 pFpuCtx->FTW |= RT_BIT(iNewTop);
5342 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5343 iemFpuRotateStackPush(pFpuCtx);
5344 }
5345 else
5346 {
5347 /* Exception pending - don't change TOP or the register stack. */
5348 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5349 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5350 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5351 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5352 }
5353 RT_NOREF(pVCpu);
5354}
5355
5356
5357/**
5358 * Raises a FPU stack overflow exception on a push.
5359 *
5360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5361 */
5362void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5363{
5364 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5365 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5366 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5367}
5368
5369
5370/**
5371 * Raises a FPU stack overflow exception on a push with a memory operand.
5372 *
5373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5374 * @param iEffSeg The effective memory operand selector register.
5375 * @param GCPtrEff The effective memory operand offset.
5376 */
5377void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5378{
5379 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5380 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5381 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5382 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5383}
5384
5385/** @} */
5386
5387
5388/** @name SSE+AVX SIMD access and helpers.
5389 *
5390 * @{
5391 */
5392/**
5393 * Stores a result in a SIMD XMM register, updates the MXCSR.
5394 *
5395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5396 * @param pResult The result to store.
5397 * @param iXmmReg Which SIMD XMM register to store the result in.
5398 */
5399void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5400{
5401 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5402 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5403
5404 /* The result is only updated if there is no unmasked exception pending. */
5405 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5406 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5407 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5408}
5409
5410
5411/**
5412 * Updates the MXCSR.
5413 *
5414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5415 * @param fMxcsr The new MXCSR value.
5416 */
5417void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5418{
5419 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5420 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5421}
5422/** @} */
5423
5424
5425/** @name Memory access.
5426 *
5427 * @{
5428 */
5429
5430
5431/**
5432 * Updates the IEMCPU::cbWritten counter if applicable.
5433 *
5434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5435 * @param fAccess The access being accounted for.
5436 * @param cbMem The access size.
5437 */
5438DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5439{
5440 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5441 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5442 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5443}
5444
5445
5446/**
5447 * Applies the segment limit, base and attributes.
5448 *
5449 * This may raise a \#GP or \#SS.
5450 *
5451 * @returns VBox strict status code.
5452 *
5453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5454 * @param fAccess The kind of access which is being performed.
5455 * @param iSegReg The index of the segment register to apply.
5456 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5457 * TSS, ++).
5458 * @param cbMem The access size.
5459 * @param pGCPtrMem Pointer to the guest memory address to apply
5460 * segmentation to. Input and output parameter.
5461 */
5462VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5463{
5464 if (iSegReg == UINT8_MAX)
5465 return VINF_SUCCESS;
5466
5467 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5468 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5469 switch (IEM_GET_CPU_MODE(pVCpu))
5470 {
5471 case IEMMODE_16BIT:
5472 case IEMMODE_32BIT:
5473 {
5474 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5475 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5476
5477 if ( pSel->Attr.n.u1Present
5478 && !pSel->Attr.n.u1Unusable)
5479 {
5480 Assert(pSel->Attr.n.u1DescType);
5481 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5482 {
5483 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5484 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5485 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5486
5487 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5488 {
5489 /** @todo CPL check. */
5490 }
5491
5492 /*
5493 * There are two kinds of data selectors, normal and expand down.
5494 */
5495 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5496 {
5497 if ( GCPtrFirst32 > pSel->u32Limit
5498 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5499 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5500 }
5501 else
5502 {
5503 /*
5504 * The upper boundary is defined by the B bit, not the G bit!
5505 */
5506 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5507 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5508 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5509 }
5510 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5511 }
5512 else
5513 {
5514 /*
5515 * Code selector and usually be used to read thru, writing is
5516 * only permitted in real and V8086 mode.
5517 */
5518 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5519 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5520 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5521 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5522 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5523
5524 if ( GCPtrFirst32 > pSel->u32Limit
5525 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5526 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5527
5528 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5529 {
5530 /** @todo CPL check. */
5531 }
5532
5533 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5534 }
5535 }
5536 else
5537 return iemRaiseGeneralProtectionFault0(pVCpu);
5538 return VINF_SUCCESS;
5539 }
5540
5541 case IEMMODE_64BIT:
5542 {
5543 RTGCPTR GCPtrMem = *pGCPtrMem;
5544 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5545 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5546
5547 Assert(cbMem >= 1);
5548 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5549 return VINF_SUCCESS;
5550 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5551 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5552 return iemRaiseGeneralProtectionFault0(pVCpu);
5553 }
5554
5555 default:
5556 AssertFailedReturn(VERR_IEM_IPE_7);
5557 }
5558}
5559
5560
5561/**
5562 * Translates a virtual address to a physical physical address and checks if we
5563 * can access the page as specified.
5564 *
5565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5566 * @param GCPtrMem The virtual address.
5567 * @param cbAccess The access size, for raising \#PF correctly for
5568 * FXSAVE and such.
5569 * @param fAccess The intended access.
5570 * @param pGCPhysMem Where to return the physical address.
5571 */
5572VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5573 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5574{
5575 /** @todo Need a different PGM interface here. We're currently using
5576 * generic / REM interfaces. this won't cut it for R0. */
5577 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5578 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5579 * here. */
5580 PGMPTWALK Walk;
5581 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5582 if (RT_FAILURE(rc))
5583 {
5584 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5585 /** @todo Check unassigned memory in unpaged mode. */
5586 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5587#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5588 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5589 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5590#endif
5591 *pGCPhysMem = NIL_RTGCPHYS;
5592 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5593 }
5594
5595 /* If the page is writable and does not have the no-exec bit set, all
5596 access is allowed. Otherwise we'll have to check more carefully... */
5597 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5598 {
5599 /* Write to read only memory? */
5600 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5601 && !(Walk.fEffective & X86_PTE_RW)
5602 && ( ( IEM_GET_CPL(pVCpu) == 3
5603 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5604 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5605 {
5606 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5607 *pGCPhysMem = NIL_RTGCPHYS;
5608#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5609 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5610 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5611#endif
5612 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5613 }
5614
5615 /* Kernel memory accessed by userland? */
5616 if ( !(Walk.fEffective & X86_PTE_US)
5617 && IEM_GET_CPL(pVCpu) == 3
5618 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5619 {
5620 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5621 *pGCPhysMem = NIL_RTGCPHYS;
5622#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5623 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5624 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5625#endif
5626 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5627 }
5628
5629 /* Executing non-executable memory? */
5630 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5631 && (Walk.fEffective & X86_PTE_PAE_NX)
5632 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5633 {
5634 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5635 *pGCPhysMem = NIL_RTGCPHYS;
5636#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5637 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5638 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5639#endif
5640 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5641 VERR_ACCESS_DENIED);
5642 }
5643 }
5644
5645 /*
5646 * Set the dirty / access flags.
5647 * ASSUMES this is set when the address is translated rather than on committ...
5648 */
5649 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5650 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5651 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5652 {
5653 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5654 AssertRC(rc2);
5655 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5656 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5657 }
5658
5659 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5660 *pGCPhysMem = GCPhys;
5661 return VINF_SUCCESS;
5662}
5663
5664
5665/**
5666 * Looks up a memory mapping entry.
5667 *
5668 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5670 * @param pvMem The memory address.
5671 * @param fAccess The access to.
5672 */
5673DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5674{
5675 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5676 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5677 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5678 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5679 return 0;
5680 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5681 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5682 return 1;
5683 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5684 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5685 return 2;
5686 return VERR_NOT_FOUND;
5687}
5688
5689
5690/**
5691 * Finds a free memmap entry when using iNextMapping doesn't work.
5692 *
5693 * @returns Memory mapping index, 1024 on failure.
5694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5695 */
5696static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5697{
5698 /*
5699 * The easy case.
5700 */
5701 if (pVCpu->iem.s.cActiveMappings == 0)
5702 {
5703 pVCpu->iem.s.iNextMapping = 1;
5704 return 0;
5705 }
5706
5707 /* There should be enough mappings for all instructions. */
5708 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5709
5710 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5711 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5712 return i;
5713
5714 AssertFailedReturn(1024);
5715}
5716
5717
5718/**
5719 * Commits a bounce buffer that needs writing back and unmaps it.
5720 *
5721 * @returns Strict VBox status code.
5722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5723 * @param iMemMap The index of the buffer to commit.
5724 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5725 * Always false in ring-3, obviously.
5726 */
5727static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5728{
5729 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5730 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5731#ifdef IN_RING3
5732 Assert(!fPostponeFail);
5733 RT_NOREF_PV(fPostponeFail);
5734#endif
5735
5736 /*
5737 * Do the writing.
5738 */
5739 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5740 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5741 {
5742 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5743 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5744 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5745 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5746 {
5747 /*
5748 * Carefully and efficiently dealing with access handler return
5749 * codes make this a little bloated.
5750 */
5751 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5752 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5753 pbBuf,
5754 cbFirst,
5755 PGMACCESSORIGIN_IEM);
5756 if (rcStrict == VINF_SUCCESS)
5757 {
5758 if (cbSecond)
5759 {
5760 rcStrict = PGMPhysWrite(pVM,
5761 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5762 pbBuf + cbFirst,
5763 cbSecond,
5764 PGMACCESSORIGIN_IEM);
5765 if (rcStrict == VINF_SUCCESS)
5766 { /* nothing */ }
5767 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5768 {
5769 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5770 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5771 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5772 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5773 }
5774#ifndef IN_RING3
5775 else if (fPostponeFail)
5776 {
5777 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5778 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5779 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5780 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5781 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5782 return iemSetPassUpStatus(pVCpu, rcStrict);
5783 }
5784#endif
5785 else
5786 {
5787 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5789 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5790 return rcStrict;
5791 }
5792 }
5793 }
5794 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5795 {
5796 if (!cbSecond)
5797 {
5798 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5799 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5800 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5801 }
5802 else
5803 {
5804 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5806 pbBuf + cbFirst,
5807 cbSecond,
5808 PGMACCESSORIGIN_IEM);
5809 if (rcStrict2 == VINF_SUCCESS)
5810 {
5811 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5812 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5814 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5815 }
5816 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5817 {
5818 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5820 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5821 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5822 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5823 }
5824#ifndef IN_RING3
5825 else if (fPostponeFail)
5826 {
5827 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5828 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5830 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5831 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5832 return iemSetPassUpStatus(pVCpu, rcStrict);
5833 }
5834#endif
5835 else
5836 {
5837 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5838 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5839 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5840 return rcStrict2;
5841 }
5842 }
5843 }
5844#ifndef IN_RING3
5845 else if (fPostponeFail)
5846 {
5847 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5849 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5850 if (!cbSecond)
5851 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5852 else
5853 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5854 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5855 return iemSetPassUpStatus(pVCpu, rcStrict);
5856 }
5857#endif
5858 else
5859 {
5860 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5861 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5863 return rcStrict;
5864 }
5865 }
5866 else
5867 {
5868 /*
5869 * No access handlers, much simpler.
5870 */
5871 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5872 if (RT_SUCCESS(rc))
5873 {
5874 if (cbSecond)
5875 {
5876 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5877 if (RT_SUCCESS(rc))
5878 { /* likely */ }
5879 else
5880 {
5881 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5882 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5884 return rc;
5885 }
5886 }
5887 }
5888 else
5889 {
5890 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5891 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5892 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5893 return rc;
5894 }
5895 }
5896 }
5897
5898#if defined(IEM_LOG_MEMORY_WRITES)
5899 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5900 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5901 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5902 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5903 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5904 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5905
5906 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5907 g_cbIemWrote = cbWrote;
5908 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5909#endif
5910
5911 /*
5912 * Free the mapping entry.
5913 */
5914 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5915 Assert(pVCpu->iem.s.cActiveMappings != 0);
5916 pVCpu->iem.s.cActiveMappings--;
5917 return VINF_SUCCESS;
5918}
5919
5920
5921/**
5922 * iemMemMap worker that deals with a request crossing pages.
5923 */
5924static VBOXSTRICTRC
5925iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5926{
5927 Assert(cbMem <= GUEST_PAGE_SIZE);
5928
5929 /*
5930 * Do the address translations.
5931 */
5932 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5933 RTGCPHYS GCPhysFirst;
5934 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5935 if (rcStrict != VINF_SUCCESS)
5936 return rcStrict;
5937 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5938
5939 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5940 RTGCPHYS GCPhysSecond;
5941 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5942 cbSecondPage, fAccess, &GCPhysSecond);
5943 if (rcStrict != VINF_SUCCESS)
5944 return rcStrict;
5945 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5946 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5947
5948 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5949
5950 /*
5951 * Read in the current memory content if it's a read, execute or partial
5952 * write access.
5953 */
5954 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5955
5956 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5957 {
5958 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5959 {
5960 /*
5961 * Must carefully deal with access handler status codes here,
5962 * makes the code a bit bloated.
5963 */
5964 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5965 if (rcStrict == VINF_SUCCESS)
5966 {
5967 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5968 if (rcStrict == VINF_SUCCESS)
5969 { /*likely */ }
5970 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5971 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5972 else
5973 {
5974 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5975 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5976 return rcStrict;
5977 }
5978 }
5979 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5980 {
5981 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5982 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5983 {
5984 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5985 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5986 }
5987 else
5988 {
5989 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5990 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5991 return rcStrict2;
5992 }
5993 }
5994 else
5995 {
5996 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5997 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5998 return rcStrict;
5999 }
6000 }
6001 else
6002 {
6003 /*
6004 * No informational status codes here, much more straight forward.
6005 */
6006 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6007 if (RT_SUCCESS(rc))
6008 {
6009 Assert(rc == VINF_SUCCESS);
6010 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6011 if (RT_SUCCESS(rc))
6012 Assert(rc == VINF_SUCCESS);
6013 else
6014 {
6015 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6016 return rc;
6017 }
6018 }
6019 else
6020 {
6021 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6022 return rc;
6023 }
6024 }
6025 }
6026#ifdef VBOX_STRICT
6027 else
6028 memset(pbBuf, 0xcc, cbMem);
6029 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6030 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6031#endif
6032 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6033
6034 /*
6035 * Commit the bounce buffer entry.
6036 */
6037 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6038 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6039 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6040 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6041 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6042 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6043 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6044 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6045 pVCpu->iem.s.cActiveMappings++;
6046
6047 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6048 *ppvMem = pbBuf;
6049 return VINF_SUCCESS;
6050}
6051
6052
6053/**
6054 * iemMemMap woker that deals with iemMemPageMap failures.
6055 */
6056static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6057 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6058{
6059 /*
6060 * Filter out conditions we can handle and the ones which shouldn't happen.
6061 */
6062 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6063 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6064 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6065 {
6066 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6067 return rcMap;
6068 }
6069 pVCpu->iem.s.cPotentialExits++;
6070
6071 /*
6072 * Read in the current memory content if it's a read, execute or partial
6073 * write access.
6074 */
6075 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6076 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6077 {
6078 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6079 memset(pbBuf, 0xff, cbMem);
6080 else
6081 {
6082 int rc;
6083 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6084 {
6085 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6086 if (rcStrict == VINF_SUCCESS)
6087 { /* nothing */ }
6088 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6089 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6090 else
6091 {
6092 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6093 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6094 return rcStrict;
6095 }
6096 }
6097 else
6098 {
6099 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6100 if (RT_SUCCESS(rc))
6101 { /* likely */ }
6102 else
6103 {
6104 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6105 GCPhysFirst, rc));
6106 return rc;
6107 }
6108 }
6109 }
6110 }
6111#ifdef VBOX_STRICT
6112 else
6113 memset(pbBuf, 0xcc, cbMem);
6114#endif
6115#ifdef VBOX_STRICT
6116 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6117 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6118#endif
6119
6120 /*
6121 * Commit the bounce buffer entry.
6122 */
6123 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6124 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6125 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6126 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6127 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6128 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6129 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6130 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6131 pVCpu->iem.s.cActiveMappings++;
6132
6133 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6134 *ppvMem = pbBuf;
6135 return VINF_SUCCESS;
6136}
6137
6138
6139
6140/**
6141 * Maps the specified guest memory for the given kind of access.
6142 *
6143 * This may be using bounce buffering of the memory if it's crossing a page
6144 * boundary or if there is an access handler installed for any of it. Because
6145 * of lock prefix guarantees, we're in for some extra clutter when this
6146 * happens.
6147 *
6148 * This may raise a \#GP, \#SS, \#PF or \#AC.
6149 *
6150 * @returns VBox strict status code.
6151 *
6152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6153 * @param ppvMem Where to return the pointer to the mapped memory.
6154 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6155 * 8, 12, 16, 32 or 512. When used by string operations
6156 * it can be up to a page.
6157 * @param iSegReg The index of the segment register to use for this
6158 * access. The base and limits are checked. Use UINT8_MAX
6159 * to indicate that no segmentation is required (for IDT,
6160 * GDT and LDT accesses).
6161 * @param GCPtrMem The address of the guest memory.
6162 * @param fAccess How the memory is being accessed. The
6163 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6164 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6165 * when raising exceptions.
6166 * @param uAlignCtl Alignment control:
6167 * - Bits 15:0 is the alignment mask.
6168 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6169 * IEM_MEMMAP_F_ALIGN_SSE, and
6170 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6171 * Pass zero to skip alignment.
6172 */
6173VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6174 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6175{
6176 /*
6177 * Check the input and figure out which mapping entry to use.
6178 */
6179 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6180 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6181 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6182 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6183 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6184
6185 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6186 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6187 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6188 {
6189 iMemMap = iemMemMapFindFree(pVCpu);
6190 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6191 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6192 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6193 pVCpu->iem.s.aMemMappings[2].fAccess),
6194 VERR_IEM_IPE_9);
6195 }
6196
6197 /*
6198 * Map the memory, checking that we can actually access it. If something
6199 * slightly complicated happens, fall back on bounce buffering.
6200 */
6201 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6202 if (rcStrict == VINF_SUCCESS)
6203 { /* likely */ }
6204 else
6205 return rcStrict;
6206
6207 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6208 { /* likely */ }
6209 else
6210 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6211
6212 /*
6213 * Alignment check.
6214 */
6215 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6216 { /* likelyish */ }
6217 else
6218 {
6219 /* Misaligned access. */
6220 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6221 {
6222 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6223 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6224 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6225 {
6226 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6227
6228 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6229 return iemRaiseAlignmentCheckException(pVCpu);
6230 }
6231 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6232 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6233 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6234 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6235 * that's what FXSAVE does on a 10980xe. */
6236 && iemMemAreAlignmentChecksEnabled(pVCpu))
6237 return iemRaiseAlignmentCheckException(pVCpu);
6238 else
6239 return iemRaiseGeneralProtectionFault0(pVCpu);
6240 }
6241 }
6242
6243#ifdef IEM_WITH_DATA_TLB
6244 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6245
6246 /*
6247 * Get the TLB entry for this page.
6248 */
6249 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6250 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6251 if (pTlbe->uTag == uTag)
6252 {
6253# ifdef VBOX_WITH_STATISTICS
6254 pVCpu->iem.s.DataTlb.cTlbHits++;
6255# endif
6256 }
6257 else
6258 {
6259 pVCpu->iem.s.DataTlb.cTlbMisses++;
6260 PGMPTWALK Walk;
6261 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6262 if (RT_FAILURE(rc))
6263 {
6264 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6265# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6266 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6267 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6268# endif
6269 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6270 }
6271
6272 Assert(Walk.fSucceeded);
6273 pTlbe->uTag = uTag;
6274 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6275 pTlbe->GCPhys = Walk.GCPhys;
6276 pTlbe->pbMappingR3 = NULL;
6277 }
6278
6279 /*
6280 * Check TLB page table level access flags.
6281 */
6282 /* If the page is either supervisor only or non-writable, we need to do
6283 more careful access checks. */
6284 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6285 {
6286 /* Write to read only memory? */
6287 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6288 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6289 && ( ( IEM_GET_CPL(pVCpu) == 3
6290 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6291 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6292 {
6293 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6294# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6295 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6296 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6297# endif
6298 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6299 }
6300
6301 /* Kernel memory accessed by userland? */
6302 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6303 && IEM_GET_CPL(pVCpu) == 3
6304 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6305 {
6306 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6307# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6308 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6309 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6310# endif
6311 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6312 }
6313 }
6314
6315 /*
6316 * Set the dirty / access flags.
6317 * ASSUMES this is set when the address is translated rather than on commit...
6318 */
6319 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6320 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6321 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6322 {
6323 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6324 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6325 AssertRC(rc2);
6326 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6327 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6328 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6329 }
6330
6331 /*
6332 * Look up the physical page info if necessary.
6333 */
6334 uint8_t *pbMem = NULL;
6335 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6336# ifdef IN_RING3
6337 pbMem = pTlbe->pbMappingR3;
6338# else
6339 pbMem = NULL;
6340# endif
6341 else
6342 {
6343 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6344 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6345 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6346 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6347 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6348 { /* likely */ }
6349 else
6350 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6351 pTlbe->pbMappingR3 = NULL;
6352 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6353 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6354 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6355 &pbMem, &pTlbe->fFlagsAndPhysRev);
6356 AssertRCReturn(rc, rc);
6357# ifdef IN_RING3
6358 pTlbe->pbMappingR3 = pbMem;
6359# endif
6360 }
6361
6362 /*
6363 * Check the physical page level access and mapping.
6364 */
6365 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6366 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6367 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6368 { /* probably likely */ }
6369 else
6370 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6371 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6372 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6373 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6374 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6375 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6376
6377 if (pbMem)
6378 {
6379 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6380 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6381 fAccess |= IEM_ACCESS_NOT_LOCKED;
6382 }
6383 else
6384 {
6385 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6386 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6387 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6388 if (rcStrict != VINF_SUCCESS)
6389 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6390 }
6391
6392 void * const pvMem = pbMem;
6393
6394 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6395 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6396 if (fAccess & IEM_ACCESS_TYPE_READ)
6397 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6398
6399#else /* !IEM_WITH_DATA_TLB */
6400
6401 RTGCPHYS GCPhysFirst;
6402 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6403 if (rcStrict != VINF_SUCCESS)
6404 return rcStrict;
6405
6406 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6407 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6408 if (fAccess & IEM_ACCESS_TYPE_READ)
6409 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6410
6411 void *pvMem;
6412 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6413 if (rcStrict != VINF_SUCCESS)
6414 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6415
6416#endif /* !IEM_WITH_DATA_TLB */
6417
6418 /*
6419 * Fill in the mapping table entry.
6420 */
6421 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6422 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6423 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6424 pVCpu->iem.s.cActiveMappings += 1;
6425
6426 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6427 *ppvMem = pvMem;
6428
6429 return VINF_SUCCESS;
6430}
6431
6432
6433/**
6434 * Commits the guest memory if bounce buffered and unmaps it.
6435 *
6436 * @returns Strict VBox status code.
6437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6438 * @param pvMem The mapping.
6439 * @param fAccess The kind of access.
6440 */
6441VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6442{
6443 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6444 AssertReturn(iMemMap >= 0, iMemMap);
6445
6446 /* If it's bounce buffered, we may need to write back the buffer. */
6447 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6448 {
6449 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6450 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6451 }
6452 /* Otherwise unlock it. */
6453 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6454 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6455
6456 /* Free the entry. */
6457 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6458 Assert(pVCpu->iem.s.cActiveMappings != 0);
6459 pVCpu->iem.s.cActiveMappings--;
6460 return VINF_SUCCESS;
6461}
6462
6463#ifdef IEM_WITH_SETJMP
6464
6465/**
6466 * Maps the specified guest memory for the given kind of access, longjmp on
6467 * error.
6468 *
6469 * This may be using bounce buffering of the memory if it's crossing a page
6470 * boundary or if there is an access handler installed for any of it. Because
6471 * of lock prefix guarantees, we're in for some extra clutter when this
6472 * happens.
6473 *
6474 * This may raise a \#GP, \#SS, \#PF or \#AC.
6475 *
6476 * @returns Pointer to the mapped memory.
6477 *
6478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6479 * @param cbMem The number of bytes to map. This is usually 1,
6480 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6481 * string operations it can be up to a page.
6482 * @param iSegReg The index of the segment register to use for
6483 * this access. The base and limits are checked.
6484 * Use UINT8_MAX to indicate that no segmentation
6485 * is required (for IDT, GDT and LDT accesses).
6486 * @param GCPtrMem The address of the guest memory.
6487 * @param fAccess How the memory is being accessed. The
6488 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6489 * how to map the memory, while the
6490 * IEM_ACCESS_WHAT_XXX bit is used when raising
6491 * exceptions.
6492 * @param uAlignCtl Alignment control:
6493 * - Bits 15:0 is the alignment mask.
6494 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6495 * IEM_MEMMAP_F_ALIGN_SSE, and
6496 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6497 * Pass zero to skip alignment.
6498 */
6499void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6500 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6501{
6502 /*
6503 * Check the input, check segment access and adjust address
6504 * with segment base.
6505 */
6506 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6507 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6508 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6509
6510 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6511 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6512 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6513
6514 /*
6515 * Alignment check.
6516 */
6517 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6518 { /* likelyish */ }
6519 else
6520 {
6521 /* Misaligned access. */
6522 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6523 {
6524 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6525 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6526 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6527 {
6528 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6529
6530 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6531 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6532 }
6533 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6534 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6535 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6536 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6537 * that's what FXSAVE does on a 10980xe. */
6538 && iemMemAreAlignmentChecksEnabled(pVCpu))
6539 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6540 else
6541 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6542 }
6543 }
6544
6545 /*
6546 * Figure out which mapping entry to use.
6547 */
6548 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6549 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6550 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6551 {
6552 iMemMap = iemMemMapFindFree(pVCpu);
6553 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6554 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6555 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6556 pVCpu->iem.s.aMemMappings[2].fAccess),
6557 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6558 }
6559
6560 /*
6561 * Crossing a page boundary?
6562 */
6563 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6564 { /* No (likely). */ }
6565 else
6566 {
6567 void *pvMem;
6568 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6569 if (rcStrict == VINF_SUCCESS)
6570 return pvMem;
6571 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6572 }
6573
6574#ifdef IEM_WITH_DATA_TLB
6575 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6576
6577 /*
6578 * Get the TLB entry for this page.
6579 */
6580 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6581 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6582 if (pTlbe->uTag == uTag)
6583 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6584 else
6585 {
6586 pVCpu->iem.s.DataTlb.cTlbMisses++;
6587 PGMPTWALK Walk;
6588 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6589 if (RT_FAILURE(rc))
6590 {
6591 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6592# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6593 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6594 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6595# endif
6596 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6597 }
6598
6599 Assert(Walk.fSucceeded);
6600 pTlbe->uTag = uTag;
6601 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6602 pTlbe->GCPhys = Walk.GCPhys;
6603 pTlbe->pbMappingR3 = NULL;
6604 }
6605
6606 /*
6607 * Check the flags and physical revision.
6608 */
6609 /** @todo make the caller pass these in with fAccess. */
6610 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6611 ? IEMTLBE_F_PT_NO_USER : 0;
6612 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6613 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6614 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6615 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6616 ? IEMTLBE_F_PT_NO_WRITE : 0)
6617 : 0;
6618 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6619 uint8_t *pbMem = NULL;
6620 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6621 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6622# ifdef IN_RING3
6623 pbMem = pTlbe->pbMappingR3;
6624# else
6625 pbMem = NULL;
6626# endif
6627 else
6628 {
6629 /*
6630 * Okay, something isn't quite right or needs refreshing.
6631 */
6632 /* Write to read only memory? */
6633 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6634 {
6635 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6636# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6637 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6638 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6639# endif
6640 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6641 }
6642
6643 /* Kernel memory accessed by userland? */
6644 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6645 {
6646 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6647# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6648 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6649 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6650# endif
6651 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6652 }
6653
6654 /* Set the dirty / access flags.
6655 ASSUMES this is set when the address is translated rather than on commit... */
6656 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6657 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6658 {
6659 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6660 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6661 AssertRC(rc2);
6662 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6663 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6664 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6665 }
6666
6667 /*
6668 * Check if the physical page info needs updating.
6669 */
6670 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6671# ifdef IN_RING3
6672 pbMem = pTlbe->pbMappingR3;
6673# else
6674 pbMem = NULL;
6675# endif
6676 else
6677 {
6678 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6679 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6680 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6681 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6682 pTlbe->pbMappingR3 = NULL;
6683 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6684 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6685 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6686 &pbMem, &pTlbe->fFlagsAndPhysRev);
6687 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6688# ifdef IN_RING3
6689 pTlbe->pbMappingR3 = pbMem;
6690# endif
6691 }
6692
6693 /*
6694 * Check the physical page level access and mapping.
6695 */
6696 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6697 { /* probably likely */ }
6698 else
6699 {
6700 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6701 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6702 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6703 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6704 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6705 if (rcStrict == VINF_SUCCESS)
6706 return pbMem;
6707 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6708 }
6709 }
6710 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6711
6712 if (pbMem)
6713 {
6714 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6715 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6716 fAccess |= IEM_ACCESS_NOT_LOCKED;
6717 }
6718 else
6719 {
6720 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6721 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6722 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6723 if (rcStrict == VINF_SUCCESS)
6724 return pbMem;
6725 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6726 }
6727
6728 void * const pvMem = pbMem;
6729
6730 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6731 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6732 if (fAccess & IEM_ACCESS_TYPE_READ)
6733 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6734
6735#else /* !IEM_WITH_DATA_TLB */
6736
6737
6738 RTGCPHYS GCPhysFirst;
6739 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6740 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6741 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6742
6743 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6744 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6745 if (fAccess & IEM_ACCESS_TYPE_READ)
6746 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6747
6748 void *pvMem;
6749 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6750 if (rcStrict == VINF_SUCCESS)
6751 { /* likely */ }
6752 else
6753 {
6754 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6755 if (rcStrict == VINF_SUCCESS)
6756 return pvMem;
6757 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6758 }
6759
6760#endif /* !IEM_WITH_DATA_TLB */
6761
6762 /*
6763 * Fill in the mapping table entry.
6764 */
6765 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6766 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6767 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6768 pVCpu->iem.s.cActiveMappings++;
6769
6770 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6771 return pvMem;
6772}
6773
6774
6775/**
6776 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6777 *
6778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6779 * @param pvMem The mapping.
6780 * @param fAccess The kind of access.
6781 */
6782void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6783{
6784 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6785 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6786
6787 /* If it's bounce buffered, we may need to write back the buffer. */
6788 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6789 {
6790 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6791 {
6792 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6793 if (rcStrict == VINF_SUCCESS)
6794 return;
6795 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6796 }
6797 }
6798 /* Otherwise unlock it. */
6799 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6800 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6801
6802 /* Free the entry. */
6803 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6804 Assert(pVCpu->iem.s.cActiveMappings != 0);
6805 pVCpu->iem.s.cActiveMappings--;
6806}
6807
6808#endif /* IEM_WITH_SETJMP */
6809
6810#ifndef IN_RING3
6811/**
6812 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6813 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6814 *
6815 * Allows the instruction to be completed and retired, while the IEM user will
6816 * return to ring-3 immediately afterwards and do the postponed writes there.
6817 *
6818 * @returns VBox status code (no strict statuses). Caller must check
6819 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6821 * @param pvMem The mapping.
6822 * @param fAccess The kind of access.
6823 */
6824VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6825{
6826 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6827 AssertReturn(iMemMap >= 0, iMemMap);
6828
6829 /* If it's bounce buffered, we may need to write back the buffer. */
6830 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6831 {
6832 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6833 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6834 }
6835 /* Otherwise unlock it. */
6836 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6837 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6838
6839 /* Free the entry. */
6840 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6841 Assert(pVCpu->iem.s.cActiveMappings != 0);
6842 pVCpu->iem.s.cActiveMappings--;
6843 return VINF_SUCCESS;
6844}
6845#endif
6846
6847
6848/**
6849 * Rollbacks mappings, releasing page locks and such.
6850 *
6851 * The caller shall only call this after checking cActiveMappings.
6852 *
6853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6854 */
6855void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6856{
6857 Assert(pVCpu->iem.s.cActiveMappings > 0);
6858
6859 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6860 while (iMemMap-- > 0)
6861 {
6862 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6863 if (fAccess != IEM_ACCESS_INVALID)
6864 {
6865 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6866 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6867 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6868 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6869 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6870 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6871 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6873 pVCpu->iem.s.cActiveMappings--;
6874 }
6875 }
6876}
6877
6878
6879/**
6880 * Fetches a data byte.
6881 *
6882 * @returns Strict VBox status code.
6883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6884 * @param pu8Dst Where to return the byte.
6885 * @param iSegReg The index of the segment register to use for
6886 * this access. The base and limits are checked.
6887 * @param GCPtrMem The address of the guest memory.
6888 */
6889VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6890{
6891 /* The lazy approach for now... */
6892 uint8_t const *pu8Src;
6893 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6894 if (rc == VINF_SUCCESS)
6895 {
6896 *pu8Dst = *pu8Src;
6897 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6898 }
6899 return rc;
6900}
6901
6902
6903#ifdef IEM_WITH_SETJMP
6904/**
6905 * Fetches a data byte, longjmp on error.
6906 *
6907 * @returns The byte.
6908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6909 * @param iSegReg The index of the segment register to use for
6910 * this access. The base and limits are checked.
6911 * @param GCPtrMem The address of the guest memory.
6912 */
6913uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6914{
6915 /* The lazy approach for now... */
6916 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6917 uint8_t const bRet = *pu8Src;
6918 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6919 return bRet;
6920}
6921#endif /* IEM_WITH_SETJMP */
6922
6923
6924/**
6925 * Fetches a data word.
6926 *
6927 * @returns Strict VBox status code.
6928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6929 * @param pu16Dst Where to return the word.
6930 * @param iSegReg The index of the segment register to use for
6931 * this access. The base and limits are checked.
6932 * @param GCPtrMem The address of the guest memory.
6933 */
6934VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6935{
6936 /* The lazy approach for now... */
6937 uint16_t const *pu16Src;
6938 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6939 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6940 if (rc == VINF_SUCCESS)
6941 {
6942 *pu16Dst = *pu16Src;
6943 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6944 }
6945 return rc;
6946}
6947
6948
6949#ifdef IEM_WITH_SETJMP
6950/**
6951 * Fetches a data word, longjmp on error.
6952 *
6953 * @returns The word
6954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6955 * @param iSegReg The index of the segment register to use for
6956 * this access. The base and limits are checked.
6957 * @param GCPtrMem The address of the guest memory.
6958 */
6959uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6960{
6961 /* The lazy approach for now... */
6962 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6963 sizeof(*pu16Src) - 1);
6964 uint16_t const u16Ret = *pu16Src;
6965 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6966 return u16Ret;
6967}
6968#endif
6969
6970
6971/**
6972 * Fetches a data dword.
6973 *
6974 * @returns Strict VBox status code.
6975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6976 * @param pu32Dst Where to return the dword.
6977 * @param iSegReg The index of the segment register to use for
6978 * this access. The base and limits are checked.
6979 * @param GCPtrMem The address of the guest memory.
6980 */
6981VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6982{
6983 /* The lazy approach for now... */
6984 uint32_t const *pu32Src;
6985 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6986 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6987 if (rc == VINF_SUCCESS)
6988 {
6989 *pu32Dst = *pu32Src;
6990 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6991 }
6992 return rc;
6993}
6994
6995
6996/**
6997 * Fetches a data dword and zero extends it to a qword.
6998 *
6999 * @returns Strict VBox status code.
7000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7001 * @param pu64Dst Where to return the qword.
7002 * @param iSegReg The index of the segment register to use for
7003 * this access. The base and limits are checked.
7004 * @param GCPtrMem The address of the guest memory.
7005 */
7006VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7007{
7008 /* The lazy approach for now... */
7009 uint32_t const *pu32Src;
7010 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7011 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7012 if (rc == VINF_SUCCESS)
7013 {
7014 *pu64Dst = *pu32Src;
7015 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7016 }
7017 return rc;
7018}
7019
7020
7021#ifdef IEM_WITH_SETJMP
7022
7023/**
7024 * Fetches a data dword, longjmp on error, fallback/safe version.
7025 *
7026 * @returns The dword
7027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7028 * @param iSegReg The index of the segment register to use for
7029 * this access. The base and limits are checked.
7030 * @param GCPtrMem The address of the guest memory.
7031 */
7032uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7033{
7034 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7035 sizeof(*pu32Src) - 1);
7036 uint32_t const u32Ret = *pu32Src;
7037 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7038 return u32Ret;
7039}
7040
7041
7042/**
7043 * Fetches a data dword, longjmp on error.
7044 *
7045 * @returns The dword
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 * @param iSegReg The index of the segment register to use for
7048 * this access. The base and limits are checked.
7049 * @param GCPtrMem The address of the guest memory.
7050 */
7051uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7052{
7053# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7054 /*
7055 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7056 */
7057 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7058 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7059 {
7060 /*
7061 * TLB lookup.
7062 */
7063 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7064 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7065 if (pTlbe->uTag == uTag)
7066 {
7067 /*
7068 * Check TLB page table level access flags.
7069 */
7070 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7071 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7072 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7073 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7074 {
7075 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7076
7077 /*
7078 * Alignment check:
7079 */
7080 /** @todo check priority \#AC vs \#PF */
7081 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7082 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7083 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7084 || IEM_GET_CPL(pVCpu) != 3)
7085 {
7086 /*
7087 * Fetch and return the dword
7088 */
7089 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7090 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7091 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7092 }
7093 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7094 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7095 }
7096 }
7097 }
7098
7099 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7100 outdated page pointer, or other troubles. */
7101 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7102 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7103
7104# else
7105 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7106 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7107 uint32_t const u32Ret = *pu32Src;
7108 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7109 return u32Ret;
7110# endif
7111}
7112#endif
7113
7114
7115#ifdef SOME_UNUSED_FUNCTION
7116/**
7117 * Fetches a data dword and sign extends it to a qword.
7118 *
7119 * @returns Strict VBox status code.
7120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7121 * @param pu64Dst Where to return the sign extended value.
7122 * @param iSegReg The index of the segment register to use for
7123 * this access. The base and limits are checked.
7124 * @param GCPtrMem The address of the guest memory.
7125 */
7126VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7127{
7128 /* The lazy approach for now... */
7129 int32_t const *pi32Src;
7130 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7131 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7132 if (rc == VINF_SUCCESS)
7133 {
7134 *pu64Dst = *pi32Src;
7135 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7136 }
7137#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7138 else
7139 *pu64Dst = 0;
7140#endif
7141 return rc;
7142}
7143#endif
7144
7145
7146/**
7147 * Fetches a data qword.
7148 *
7149 * @returns Strict VBox status code.
7150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7151 * @param pu64Dst Where to return the qword.
7152 * @param iSegReg The index of the segment register to use for
7153 * this access. The base and limits are checked.
7154 * @param GCPtrMem The address of the guest memory.
7155 */
7156VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7157{
7158 /* The lazy approach for now... */
7159 uint64_t const *pu64Src;
7160 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7161 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7162 if (rc == VINF_SUCCESS)
7163 {
7164 *pu64Dst = *pu64Src;
7165 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7166 }
7167 return rc;
7168}
7169
7170
7171#ifdef IEM_WITH_SETJMP
7172/**
7173 * Fetches a data qword, longjmp on error.
7174 *
7175 * @returns The qword.
7176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7177 * @param iSegReg The index of the segment register to use for
7178 * this access. The base and limits are checked.
7179 * @param GCPtrMem The address of the guest memory.
7180 */
7181uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7182{
7183 /* The lazy approach for now... */
7184 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7185 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7186 uint64_t const u64Ret = *pu64Src;
7187 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7188 return u64Ret;
7189}
7190#endif
7191
7192
7193/**
7194 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7195 *
7196 * @returns Strict VBox status code.
7197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7198 * @param pu64Dst Where to return the qword.
7199 * @param iSegReg The index of the segment register to use for
7200 * this access. The base and limits are checked.
7201 * @param GCPtrMem The address of the guest memory.
7202 */
7203VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7204{
7205 /* The lazy approach for now... */
7206 uint64_t const *pu64Src;
7207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7208 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7209 if (rc == VINF_SUCCESS)
7210 {
7211 *pu64Dst = *pu64Src;
7212 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7213 }
7214 return rc;
7215}
7216
7217
7218#ifdef IEM_WITH_SETJMP
7219/**
7220 * Fetches a data qword, longjmp on error.
7221 *
7222 * @returns The qword.
7223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7224 * @param iSegReg The index of the segment register to use for
7225 * this access. The base and limits are checked.
7226 * @param GCPtrMem The address of the guest memory.
7227 */
7228uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7229{
7230 /* The lazy approach for now... */
7231 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7232 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7233 uint64_t const u64Ret = *pu64Src;
7234 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7235 return u64Ret;
7236}
7237#endif
7238
7239
7240/**
7241 * Fetches a data tword.
7242 *
7243 * @returns Strict VBox status code.
7244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7245 * @param pr80Dst Where to return the tword.
7246 * @param iSegReg The index of the segment register to use for
7247 * this access. The base and limits are checked.
7248 * @param GCPtrMem The address of the guest memory.
7249 */
7250VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7251{
7252 /* The lazy approach for now... */
7253 PCRTFLOAT80U pr80Src;
7254 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7255 if (rc == VINF_SUCCESS)
7256 {
7257 *pr80Dst = *pr80Src;
7258 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7259 }
7260 return rc;
7261}
7262
7263
7264#ifdef IEM_WITH_SETJMP
7265/**
7266 * Fetches a data tword, longjmp on error.
7267 *
7268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7269 * @param pr80Dst Where to return the tword.
7270 * @param iSegReg The index of the segment register to use for
7271 * this access. The base and limits are checked.
7272 * @param GCPtrMem The address of the guest memory.
7273 */
7274void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7275{
7276 /* The lazy approach for now... */
7277 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7278 *pr80Dst = *pr80Src;
7279 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7280}
7281#endif
7282
7283
7284/**
7285 * Fetches a data decimal tword.
7286 *
7287 * @returns Strict VBox status code.
7288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7289 * @param pd80Dst Where to return the tword.
7290 * @param iSegReg The index of the segment register to use for
7291 * this access. The base and limits are checked.
7292 * @param GCPtrMem The address of the guest memory.
7293 */
7294VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7295{
7296 /* The lazy approach for now... */
7297 PCRTPBCD80U pd80Src;
7298 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7299 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7300 if (rc == VINF_SUCCESS)
7301 {
7302 *pd80Dst = *pd80Src;
7303 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7304 }
7305 return rc;
7306}
7307
7308
7309#ifdef IEM_WITH_SETJMP
7310/**
7311 * Fetches a data decimal tword, longjmp on error.
7312 *
7313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7314 * @param pd80Dst Where to return the tword.
7315 * @param iSegReg The index of the segment register to use for
7316 * this access. The base and limits are checked.
7317 * @param GCPtrMem The address of the guest memory.
7318 */
7319void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7320{
7321 /* The lazy approach for now... */
7322 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7323 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7324 *pd80Dst = *pd80Src;
7325 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7326}
7327#endif
7328
7329
7330/**
7331 * Fetches a data dqword (double qword), generally SSE related.
7332 *
7333 * @returns Strict VBox status code.
7334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7335 * @param pu128Dst Where to return the qword.
7336 * @param iSegReg The index of the segment register to use for
7337 * this access. The base and limits are checked.
7338 * @param GCPtrMem The address of the guest memory.
7339 */
7340VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7341{
7342 /* The lazy approach for now... */
7343 PCRTUINT128U pu128Src;
7344 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7345 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7346 if (rc == VINF_SUCCESS)
7347 {
7348 pu128Dst->au64[0] = pu128Src->au64[0];
7349 pu128Dst->au64[1] = pu128Src->au64[1];
7350 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7351 }
7352 return rc;
7353}
7354
7355
7356#ifdef IEM_WITH_SETJMP
7357/**
7358 * Fetches a data dqword (double qword), generally SSE related.
7359 *
7360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7361 * @param pu128Dst Where to return the qword.
7362 * @param iSegReg The index of the segment register to use for
7363 * this access. The base and limits are checked.
7364 * @param GCPtrMem The address of the guest memory.
7365 */
7366void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7367{
7368 /* The lazy approach for now... */
7369 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7370 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7371 pu128Dst->au64[0] = pu128Src->au64[0];
7372 pu128Dst->au64[1] = pu128Src->au64[1];
7373 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7374}
7375#endif
7376
7377
7378/**
7379 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7380 * related.
7381 *
7382 * Raises \#GP(0) if not aligned.
7383 *
7384 * @returns Strict VBox status code.
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 * @param pu128Dst Where to return the qword.
7387 * @param iSegReg The index of the segment register to use for
7388 * this access. The base and limits are checked.
7389 * @param GCPtrMem The address of the guest memory.
7390 */
7391VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7392{
7393 /* The lazy approach for now... */
7394 PCRTUINT128U pu128Src;
7395 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7396 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7397 if (rc == VINF_SUCCESS)
7398 {
7399 pu128Dst->au64[0] = pu128Src->au64[0];
7400 pu128Dst->au64[1] = pu128Src->au64[1];
7401 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7402 }
7403 return rc;
7404}
7405
7406
7407#ifdef IEM_WITH_SETJMP
7408/**
7409 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7410 * related, longjmp on error.
7411 *
7412 * Raises \#GP(0) if not aligned.
7413 *
7414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7415 * @param pu128Dst Where to return the qword.
7416 * @param iSegReg The index of the segment register to use for
7417 * this access. The base and limits are checked.
7418 * @param GCPtrMem The address of the guest memory.
7419 */
7420void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7421 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7422{
7423 /* The lazy approach for now... */
7424 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7425 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7426 pu128Dst->au64[0] = pu128Src->au64[0];
7427 pu128Dst->au64[1] = pu128Src->au64[1];
7428 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7429}
7430#endif
7431
7432
7433/**
7434 * Fetches a data oword (octo word), generally AVX related.
7435 *
7436 * @returns Strict VBox status code.
7437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7438 * @param pu256Dst Where to return the qword.
7439 * @param iSegReg The index of the segment register to use for
7440 * this access. The base and limits are checked.
7441 * @param GCPtrMem The address of the guest memory.
7442 */
7443VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7444{
7445 /* The lazy approach for now... */
7446 PCRTUINT256U pu256Src;
7447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7448 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7449 if (rc == VINF_SUCCESS)
7450 {
7451 pu256Dst->au64[0] = pu256Src->au64[0];
7452 pu256Dst->au64[1] = pu256Src->au64[1];
7453 pu256Dst->au64[2] = pu256Src->au64[2];
7454 pu256Dst->au64[3] = pu256Src->au64[3];
7455 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7456 }
7457 return rc;
7458}
7459
7460
7461#ifdef IEM_WITH_SETJMP
7462/**
7463 * Fetches a data oword (octo word), generally AVX related.
7464 *
7465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7466 * @param pu256Dst Where to return the qword.
7467 * @param iSegReg The index of the segment register to use for
7468 * this access. The base and limits are checked.
7469 * @param GCPtrMem The address of the guest memory.
7470 */
7471void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7472{
7473 /* The lazy approach for now... */
7474 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7475 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7476 pu256Dst->au64[0] = pu256Src->au64[0];
7477 pu256Dst->au64[1] = pu256Src->au64[1];
7478 pu256Dst->au64[2] = pu256Src->au64[2];
7479 pu256Dst->au64[3] = pu256Src->au64[3];
7480 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7481}
7482#endif
7483
7484
7485/**
7486 * Fetches a data oword (octo word) at an aligned address, generally AVX
7487 * related.
7488 *
7489 * Raises \#GP(0) if not aligned.
7490 *
7491 * @returns Strict VBox status code.
7492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7493 * @param pu256Dst Where to return the qword.
7494 * @param iSegReg The index of the segment register to use for
7495 * this access. The base and limits are checked.
7496 * @param GCPtrMem The address of the guest memory.
7497 */
7498VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7499{
7500 /* The lazy approach for now... */
7501 PCRTUINT256U pu256Src;
7502 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7503 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7504 if (rc == VINF_SUCCESS)
7505 {
7506 pu256Dst->au64[0] = pu256Src->au64[0];
7507 pu256Dst->au64[1] = pu256Src->au64[1];
7508 pu256Dst->au64[2] = pu256Src->au64[2];
7509 pu256Dst->au64[3] = pu256Src->au64[3];
7510 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7511 }
7512 return rc;
7513}
7514
7515
7516#ifdef IEM_WITH_SETJMP
7517/**
7518 * Fetches a data oword (octo word) at an aligned address, generally AVX
7519 * related, longjmp on error.
7520 *
7521 * Raises \#GP(0) if not aligned.
7522 *
7523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7524 * @param pu256Dst Where to return the qword.
7525 * @param iSegReg The index of the segment register to use for
7526 * this access. The base and limits are checked.
7527 * @param GCPtrMem The address of the guest memory.
7528 */
7529void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7530 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7531{
7532 /* The lazy approach for now... */
7533 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7534 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7535 pu256Dst->au64[0] = pu256Src->au64[0];
7536 pu256Dst->au64[1] = pu256Src->au64[1];
7537 pu256Dst->au64[2] = pu256Src->au64[2];
7538 pu256Dst->au64[3] = pu256Src->au64[3];
7539 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7540}
7541#endif
7542
7543
7544
7545/**
7546 * Fetches a descriptor register (lgdt, lidt).
7547 *
7548 * @returns Strict VBox status code.
7549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7550 * @param pcbLimit Where to return the limit.
7551 * @param pGCPtrBase Where to return the base.
7552 * @param iSegReg The index of the segment register to use for
7553 * this access. The base and limits are checked.
7554 * @param GCPtrMem The address of the guest memory.
7555 * @param enmOpSize The effective operand size.
7556 */
7557VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7558 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7559{
7560 /*
7561 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7562 * little special:
7563 * - The two reads are done separately.
7564 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7565 * - We suspect the 386 to actually commit the limit before the base in
7566 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7567 * don't try emulate this eccentric behavior, because it's not well
7568 * enough understood and rather hard to trigger.
7569 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7570 */
7571 VBOXSTRICTRC rcStrict;
7572 if (IEM_IS_64BIT_CODE(pVCpu))
7573 {
7574 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7575 if (rcStrict == VINF_SUCCESS)
7576 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7577 }
7578 else
7579 {
7580 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7581 if (enmOpSize == IEMMODE_32BIT)
7582 {
7583 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7584 {
7585 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7586 if (rcStrict == VINF_SUCCESS)
7587 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7588 }
7589 else
7590 {
7591 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7592 if (rcStrict == VINF_SUCCESS)
7593 {
7594 *pcbLimit = (uint16_t)uTmp;
7595 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7596 }
7597 }
7598 if (rcStrict == VINF_SUCCESS)
7599 *pGCPtrBase = uTmp;
7600 }
7601 else
7602 {
7603 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7604 if (rcStrict == VINF_SUCCESS)
7605 {
7606 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7607 if (rcStrict == VINF_SUCCESS)
7608 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7609 }
7610 }
7611 }
7612 return rcStrict;
7613}
7614
7615
7616
7617/**
7618 * Stores a data byte.
7619 *
7620 * @returns Strict VBox status code.
7621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7622 * @param iSegReg The index of the segment register to use for
7623 * this access. The base and limits are checked.
7624 * @param GCPtrMem The address of the guest memory.
7625 * @param u8Value The value to store.
7626 */
7627VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7628{
7629 /* The lazy approach for now... */
7630 uint8_t *pu8Dst;
7631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7632 if (rc == VINF_SUCCESS)
7633 {
7634 *pu8Dst = u8Value;
7635 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7636 }
7637 return rc;
7638}
7639
7640
7641#ifdef IEM_WITH_SETJMP
7642/**
7643 * Stores a data byte, longjmp on error.
7644 *
7645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7646 * @param iSegReg The index of the segment register to use for
7647 * this access. The base and limits are checked.
7648 * @param GCPtrMem The address of the guest memory.
7649 * @param u8Value The value to store.
7650 */
7651void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7652{
7653 /* The lazy approach for now... */
7654 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7655 *pu8Dst = u8Value;
7656 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7657}
7658#endif
7659
7660
7661/**
7662 * Stores a data word.
7663 *
7664 * @returns Strict VBox status code.
7665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7666 * @param iSegReg The index of the segment register to use for
7667 * this access. The base and limits are checked.
7668 * @param GCPtrMem The address of the guest memory.
7669 * @param u16Value The value to store.
7670 */
7671VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7672{
7673 /* The lazy approach for now... */
7674 uint16_t *pu16Dst;
7675 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7676 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7677 if (rc == VINF_SUCCESS)
7678 {
7679 *pu16Dst = u16Value;
7680 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7681 }
7682 return rc;
7683}
7684
7685
7686#ifdef IEM_WITH_SETJMP
7687/**
7688 * Stores a data word, longjmp on error.
7689 *
7690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7691 * @param iSegReg The index of the segment register to use for
7692 * this access. The base and limits are checked.
7693 * @param GCPtrMem The address of the guest memory.
7694 * @param u16Value The value to store.
7695 */
7696void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7697{
7698 /* The lazy approach for now... */
7699 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7700 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7701 *pu16Dst = u16Value;
7702 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7703}
7704#endif
7705
7706
7707/**
7708 * Stores a data dword.
7709 *
7710 * @returns Strict VBox status code.
7711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7712 * @param iSegReg The index of the segment register to use for
7713 * this access. The base and limits are checked.
7714 * @param GCPtrMem The address of the guest memory.
7715 * @param u32Value The value to store.
7716 */
7717VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7718{
7719 /* The lazy approach for now... */
7720 uint32_t *pu32Dst;
7721 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7722 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7723 if (rc == VINF_SUCCESS)
7724 {
7725 *pu32Dst = u32Value;
7726 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7727 }
7728 return rc;
7729}
7730
7731
7732#ifdef IEM_WITH_SETJMP
7733/**
7734 * Stores a data dword.
7735 *
7736 * @returns Strict VBox status code.
7737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7738 * @param iSegReg The index of the segment register to use for
7739 * this access. The base and limits are checked.
7740 * @param GCPtrMem The address of the guest memory.
7741 * @param u32Value The value to store.
7742 */
7743void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7744{
7745 /* The lazy approach for now... */
7746 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7747 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7748 *pu32Dst = u32Value;
7749 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7750}
7751#endif
7752
7753
7754/**
7755 * Stores a data qword.
7756 *
7757 * @returns Strict VBox status code.
7758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7759 * @param iSegReg The index of the segment register to use for
7760 * this access. The base and limits are checked.
7761 * @param GCPtrMem The address of the guest memory.
7762 * @param u64Value The value to store.
7763 */
7764VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7765{
7766 /* The lazy approach for now... */
7767 uint64_t *pu64Dst;
7768 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7769 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7770 if (rc == VINF_SUCCESS)
7771 {
7772 *pu64Dst = u64Value;
7773 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7774 }
7775 return rc;
7776}
7777
7778
7779#ifdef IEM_WITH_SETJMP
7780/**
7781 * Stores a data qword, longjmp on error.
7782 *
7783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7784 * @param iSegReg The index of the segment register to use for
7785 * this access. The base and limits are checked.
7786 * @param GCPtrMem The address of the guest memory.
7787 * @param u64Value The value to store.
7788 */
7789void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7790{
7791 /* The lazy approach for now... */
7792 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7793 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7794 *pu64Dst = u64Value;
7795 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7796}
7797#endif
7798
7799
7800/**
7801 * Stores a data dqword.
7802 *
7803 * @returns Strict VBox status code.
7804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7805 * @param iSegReg The index of the segment register to use for
7806 * this access. The base and limits are checked.
7807 * @param GCPtrMem The address of the guest memory.
7808 * @param u128Value The value to store.
7809 */
7810VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7811{
7812 /* The lazy approach for now... */
7813 PRTUINT128U pu128Dst;
7814 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7815 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7816 if (rc == VINF_SUCCESS)
7817 {
7818 pu128Dst->au64[0] = u128Value.au64[0];
7819 pu128Dst->au64[1] = u128Value.au64[1];
7820 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7821 }
7822 return rc;
7823}
7824
7825
7826#ifdef IEM_WITH_SETJMP
7827/**
7828 * Stores a data dqword, longjmp on error.
7829 *
7830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7831 * @param iSegReg The index of the segment register to use for
7832 * this access. The base and limits are checked.
7833 * @param GCPtrMem The address of the guest memory.
7834 * @param u128Value The value to store.
7835 */
7836void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7837{
7838 /* The lazy approach for now... */
7839 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7840 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7841 pu128Dst->au64[0] = u128Value.au64[0];
7842 pu128Dst->au64[1] = u128Value.au64[1];
7843 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7844}
7845#endif
7846
7847
7848/**
7849 * Stores a data dqword, SSE aligned.
7850 *
7851 * @returns Strict VBox status code.
7852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7853 * @param iSegReg The index of the segment register to use for
7854 * this access. The base and limits are checked.
7855 * @param GCPtrMem The address of the guest memory.
7856 * @param u128Value The value to store.
7857 */
7858VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7859{
7860 /* The lazy approach for now... */
7861 PRTUINT128U pu128Dst;
7862 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7863 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7864 if (rc == VINF_SUCCESS)
7865 {
7866 pu128Dst->au64[0] = u128Value.au64[0];
7867 pu128Dst->au64[1] = u128Value.au64[1];
7868 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7869 }
7870 return rc;
7871}
7872
7873
7874#ifdef IEM_WITH_SETJMP
7875/**
7876 * Stores a data dqword, SSE aligned.
7877 *
7878 * @returns Strict VBox status code.
7879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7880 * @param iSegReg The index of the segment register to use for
7881 * this access. The base and limits are checked.
7882 * @param GCPtrMem The address of the guest memory.
7883 * @param u128Value The value to store.
7884 */
7885void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7886 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7887{
7888 /* The lazy approach for now... */
7889 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7890 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7891 pu128Dst->au64[0] = u128Value.au64[0];
7892 pu128Dst->au64[1] = u128Value.au64[1];
7893 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7894}
7895#endif
7896
7897
7898/**
7899 * Stores a data dqword.
7900 *
7901 * @returns Strict VBox status code.
7902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7903 * @param iSegReg The index of the segment register to use for
7904 * this access. The base and limits are checked.
7905 * @param GCPtrMem The address of the guest memory.
7906 * @param pu256Value Pointer to the value to store.
7907 */
7908VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7909{
7910 /* The lazy approach for now... */
7911 PRTUINT256U pu256Dst;
7912 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7913 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7914 if (rc == VINF_SUCCESS)
7915 {
7916 pu256Dst->au64[0] = pu256Value->au64[0];
7917 pu256Dst->au64[1] = pu256Value->au64[1];
7918 pu256Dst->au64[2] = pu256Value->au64[2];
7919 pu256Dst->au64[3] = pu256Value->au64[3];
7920 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7921 }
7922 return rc;
7923}
7924
7925
7926#ifdef IEM_WITH_SETJMP
7927/**
7928 * Stores a data dqword, longjmp on error.
7929 *
7930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7931 * @param iSegReg The index of the segment register to use for
7932 * this access. The base and limits are checked.
7933 * @param GCPtrMem The address of the guest memory.
7934 * @param pu256Value Pointer to the value to store.
7935 */
7936void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7937{
7938 /* The lazy approach for now... */
7939 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7940 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7941 pu256Dst->au64[0] = pu256Value->au64[0];
7942 pu256Dst->au64[1] = pu256Value->au64[1];
7943 pu256Dst->au64[2] = pu256Value->au64[2];
7944 pu256Dst->au64[3] = pu256Value->au64[3];
7945 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7946}
7947#endif
7948
7949
7950/**
7951 * Stores a data dqword, AVX \#GP(0) aligned.
7952 *
7953 * @returns Strict VBox status code.
7954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7955 * @param iSegReg The index of the segment register to use for
7956 * this access. The base and limits are checked.
7957 * @param GCPtrMem The address of the guest memory.
7958 * @param pu256Value Pointer to the value to store.
7959 */
7960VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7961{
7962 /* The lazy approach for now... */
7963 PRTUINT256U pu256Dst;
7964 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7965 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7966 if (rc == VINF_SUCCESS)
7967 {
7968 pu256Dst->au64[0] = pu256Value->au64[0];
7969 pu256Dst->au64[1] = pu256Value->au64[1];
7970 pu256Dst->au64[2] = pu256Value->au64[2];
7971 pu256Dst->au64[3] = pu256Value->au64[3];
7972 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7973 }
7974 return rc;
7975}
7976
7977
7978#ifdef IEM_WITH_SETJMP
7979/**
7980 * Stores a data dqword, AVX aligned.
7981 *
7982 * @returns Strict VBox status code.
7983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7984 * @param iSegReg The index of the segment register to use for
7985 * this access. The base and limits are checked.
7986 * @param GCPtrMem The address of the guest memory.
7987 * @param pu256Value Pointer to the value to store.
7988 */
7989void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7990 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7991{
7992 /* The lazy approach for now... */
7993 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7994 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7995 pu256Dst->au64[0] = pu256Value->au64[0];
7996 pu256Dst->au64[1] = pu256Value->au64[1];
7997 pu256Dst->au64[2] = pu256Value->au64[2];
7998 pu256Dst->au64[3] = pu256Value->au64[3];
7999 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
8000}
8001#endif
8002
8003
8004/**
8005 * Stores a descriptor register (sgdt, sidt).
8006 *
8007 * @returns Strict VBox status code.
8008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8009 * @param cbLimit The limit.
8010 * @param GCPtrBase The base address.
8011 * @param iSegReg The index of the segment register to use for
8012 * this access. The base and limits are checked.
8013 * @param GCPtrMem The address of the guest memory.
8014 */
8015VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8016{
8017 /*
8018 * The SIDT and SGDT instructions actually stores the data using two
8019 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8020 * does not respond to opsize prefixes.
8021 */
8022 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8023 if (rcStrict == VINF_SUCCESS)
8024 {
8025 if (IEM_IS_16BIT_CODE(pVCpu))
8026 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8027 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8028 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8029 else if (IEM_IS_32BIT_CODE(pVCpu))
8030 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8031 else
8032 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8033 }
8034 return rcStrict;
8035}
8036
8037
8038/**
8039 * Pushes a word onto the stack.
8040 *
8041 * @returns Strict VBox status code.
8042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8043 * @param u16Value The value to push.
8044 */
8045VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8046{
8047 /* Increment the stack pointer. */
8048 uint64_t uNewRsp;
8049 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8050
8051 /* Write the word the lazy way. */
8052 uint16_t *pu16Dst;
8053 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8054 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8055 if (rc == VINF_SUCCESS)
8056 {
8057 *pu16Dst = u16Value;
8058 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8059 }
8060
8061 /* Commit the new RSP value unless we an access handler made trouble. */
8062 if (rc == VINF_SUCCESS)
8063 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8064
8065 return rc;
8066}
8067
8068
8069/**
8070 * Pushes a dword onto the stack.
8071 *
8072 * @returns Strict VBox status code.
8073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8074 * @param u32Value The value to push.
8075 */
8076VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8077{
8078 /* Increment the stack pointer. */
8079 uint64_t uNewRsp;
8080 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8081
8082 /* Write the dword the lazy way. */
8083 uint32_t *pu32Dst;
8084 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8085 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8086 if (rc == VINF_SUCCESS)
8087 {
8088 *pu32Dst = u32Value;
8089 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8090 }
8091
8092 /* Commit the new RSP value unless we an access handler made trouble. */
8093 if (rc == VINF_SUCCESS)
8094 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8095
8096 return rc;
8097}
8098
8099
8100/**
8101 * Pushes a dword segment register value onto the stack.
8102 *
8103 * @returns Strict VBox status code.
8104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8105 * @param u32Value The value to push.
8106 */
8107VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8108{
8109 /* Increment the stack pointer. */
8110 uint64_t uNewRsp;
8111 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8112
8113 /* The intel docs talks about zero extending the selector register
8114 value. My actual intel CPU here might be zero extending the value
8115 but it still only writes the lower word... */
8116 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8117 * happens when crossing an electric page boundrary, is the high word checked
8118 * for write accessibility or not? Probably it is. What about segment limits?
8119 * It appears this behavior is also shared with trap error codes.
8120 *
8121 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8122 * ancient hardware when it actually did change. */
8123 uint16_t *pu16Dst;
8124 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8125 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8126 if (rc == VINF_SUCCESS)
8127 {
8128 *pu16Dst = (uint16_t)u32Value;
8129 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8130 }
8131
8132 /* Commit the new RSP value unless we an access handler made trouble. */
8133 if (rc == VINF_SUCCESS)
8134 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8135
8136 return rc;
8137}
8138
8139
8140/**
8141 * Pushes a qword onto the stack.
8142 *
8143 * @returns Strict VBox status code.
8144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8145 * @param u64Value The value to push.
8146 */
8147VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8148{
8149 /* Increment the stack pointer. */
8150 uint64_t uNewRsp;
8151 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8152
8153 /* Write the word the lazy way. */
8154 uint64_t *pu64Dst;
8155 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8156 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8157 if (rc == VINF_SUCCESS)
8158 {
8159 *pu64Dst = u64Value;
8160 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8161 }
8162
8163 /* Commit the new RSP value unless we an access handler made trouble. */
8164 if (rc == VINF_SUCCESS)
8165 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8166
8167 return rc;
8168}
8169
8170
8171/**
8172 * Pops a word from the stack.
8173 *
8174 * @returns Strict VBox status code.
8175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8176 * @param pu16Value Where to store the popped value.
8177 */
8178VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8179{
8180 /* Increment the stack pointer. */
8181 uint64_t uNewRsp;
8182 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8183
8184 /* Write the word the lazy way. */
8185 uint16_t const *pu16Src;
8186 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8187 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8188 if (rc == VINF_SUCCESS)
8189 {
8190 *pu16Value = *pu16Src;
8191 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8192
8193 /* Commit the new RSP value. */
8194 if (rc == VINF_SUCCESS)
8195 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8196 }
8197
8198 return rc;
8199}
8200
8201
8202/**
8203 * Pops a dword from the stack.
8204 *
8205 * @returns Strict VBox status code.
8206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8207 * @param pu32Value Where to store the popped value.
8208 */
8209VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8210{
8211 /* Increment the stack pointer. */
8212 uint64_t uNewRsp;
8213 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8214
8215 /* Write the word the lazy way. */
8216 uint32_t const *pu32Src;
8217 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8218 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8219 if (rc == VINF_SUCCESS)
8220 {
8221 *pu32Value = *pu32Src;
8222 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8223
8224 /* Commit the new RSP value. */
8225 if (rc == VINF_SUCCESS)
8226 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8227 }
8228
8229 return rc;
8230}
8231
8232
8233/**
8234 * Pops a qword from the stack.
8235 *
8236 * @returns Strict VBox status code.
8237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8238 * @param pu64Value Where to store the popped value.
8239 */
8240VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8241{
8242 /* Increment the stack pointer. */
8243 uint64_t uNewRsp;
8244 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8245
8246 /* Write the word the lazy way. */
8247 uint64_t const *pu64Src;
8248 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8249 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8250 if (rc == VINF_SUCCESS)
8251 {
8252 *pu64Value = *pu64Src;
8253 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8254
8255 /* Commit the new RSP value. */
8256 if (rc == VINF_SUCCESS)
8257 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8258 }
8259
8260 return rc;
8261}
8262
8263
8264/**
8265 * Pushes a word onto the stack, using a temporary stack pointer.
8266 *
8267 * @returns Strict VBox status code.
8268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8269 * @param u16Value The value to push.
8270 * @param pTmpRsp Pointer to the temporary stack pointer.
8271 */
8272VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8273{
8274 /* Increment the stack pointer. */
8275 RTUINT64U NewRsp = *pTmpRsp;
8276 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8277
8278 /* Write the word the lazy way. */
8279 uint16_t *pu16Dst;
8280 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8281 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8282 if (rc == VINF_SUCCESS)
8283 {
8284 *pu16Dst = u16Value;
8285 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8286 }
8287
8288 /* Commit the new RSP value unless we an access handler made trouble. */
8289 if (rc == VINF_SUCCESS)
8290 *pTmpRsp = NewRsp;
8291
8292 return rc;
8293}
8294
8295
8296/**
8297 * Pushes a dword onto the stack, using a temporary stack pointer.
8298 *
8299 * @returns Strict VBox status code.
8300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8301 * @param u32Value The value to push.
8302 * @param pTmpRsp Pointer to the temporary stack pointer.
8303 */
8304VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8305{
8306 /* Increment the stack pointer. */
8307 RTUINT64U NewRsp = *pTmpRsp;
8308 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8309
8310 /* Write the word the lazy way. */
8311 uint32_t *pu32Dst;
8312 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8313 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8314 if (rc == VINF_SUCCESS)
8315 {
8316 *pu32Dst = u32Value;
8317 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8318 }
8319
8320 /* Commit the new RSP value unless we an access handler made trouble. */
8321 if (rc == VINF_SUCCESS)
8322 *pTmpRsp = NewRsp;
8323
8324 return rc;
8325}
8326
8327
8328/**
8329 * Pushes a dword onto the stack, using a temporary stack pointer.
8330 *
8331 * @returns Strict VBox status code.
8332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8333 * @param u64Value The value to push.
8334 * @param pTmpRsp Pointer to the temporary stack pointer.
8335 */
8336VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8337{
8338 /* Increment the stack pointer. */
8339 RTUINT64U NewRsp = *pTmpRsp;
8340 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8341
8342 /* Write the word the lazy way. */
8343 uint64_t *pu64Dst;
8344 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8345 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8346 if (rc == VINF_SUCCESS)
8347 {
8348 *pu64Dst = u64Value;
8349 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8350 }
8351
8352 /* Commit the new RSP value unless we an access handler made trouble. */
8353 if (rc == VINF_SUCCESS)
8354 *pTmpRsp = NewRsp;
8355
8356 return rc;
8357}
8358
8359
8360/**
8361 * Pops a word from the stack, using a temporary stack pointer.
8362 *
8363 * @returns Strict VBox status code.
8364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8365 * @param pu16Value Where to store the popped value.
8366 * @param pTmpRsp Pointer to the temporary stack pointer.
8367 */
8368VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8369{
8370 /* Increment the stack pointer. */
8371 RTUINT64U NewRsp = *pTmpRsp;
8372 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8373
8374 /* Write the word the lazy way. */
8375 uint16_t const *pu16Src;
8376 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8377 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8378 if (rc == VINF_SUCCESS)
8379 {
8380 *pu16Value = *pu16Src;
8381 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8382
8383 /* Commit the new RSP value. */
8384 if (rc == VINF_SUCCESS)
8385 *pTmpRsp = NewRsp;
8386 }
8387
8388 return rc;
8389}
8390
8391
8392/**
8393 * Pops a dword from the stack, using a temporary stack pointer.
8394 *
8395 * @returns Strict VBox status code.
8396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8397 * @param pu32Value Where to store the popped value.
8398 * @param pTmpRsp Pointer to the temporary stack pointer.
8399 */
8400VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8401{
8402 /* Increment the stack pointer. */
8403 RTUINT64U NewRsp = *pTmpRsp;
8404 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8405
8406 /* Write the word the lazy way. */
8407 uint32_t const *pu32Src;
8408 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8409 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8410 if (rc == VINF_SUCCESS)
8411 {
8412 *pu32Value = *pu32Src;
8413 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8414
8415 /* Commit the new RSP value. */
8416 if (rc == VINF_SUCCESS)
8417 *pTmpRsp = NewRsp;
8418 }
8419
8420 return rc;
8421}
8422
8423
8424/**
8425 * Pops a qword from the stack, using a temporary stack pointer.
8426 *
8427 * @returns Strict VBox status code.
8428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8429 * @param pu64Value Where to store the popped value.
8430 * @param pTmpRsp Pointer to the temporary stack pointer.
8431 */
8432VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8433{
8434 /* Increment the stack pointer. */
8435 RTUINT64U NewRsp = *pTmpRsp;
8436 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8437
8438 /* Write the word the lazy way. */
8439 uint64_t const *pu64Src;
8440 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8441 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8442 if (rcStrict == VINF_SUCCESS)
8443 {
8444 *pu64Value = *pu64Src;
8445 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8446
8447 /* Commit the new RSP value. */
8448 if (rcStrict == VINF_SUCCESS)
8449 *pTmpRsp = NewRsp;
8450 }
8451
8452 return rcStrict;
8453}
8454
8455
8456/**
8457 * Begin a special stack push (used by interrupt, exceptions and such).
8458 *
8459 * This will raise \#SS or \#PF if appropriate.
8460 *
8461 * @returns Strict VBox status code.
8462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8463 * @param cbMem The number of bytes to push onto the stack.
8464 * @param cbAlign The alignment mask (7, 3, 1).
8465 * @param ppvMem Where to return the pointer to the stack memory.
8466 * As with the other memory functions this could be
8467 * direct access or bounce buffered access, so
8468 * don't commit register until the commit call
8469 * succeeds.
8470 * @param puNewRsp Where to return the new RSP value. This must be
8471 * passed unchanged to
8472 * iemMemStackPushCommitSpecial().
8473 */
8474VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8475 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8476{
8477 Assert(cbMem < UINT8_MAX);
8478 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8479 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8480 IEM_ACCESS_STACK_W, cbAlign);
8481}
8482
8483
8484/**
8485 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8486 *
8487 * This will update the rSP.
8488 *
8489 * @returns Strict VBox status code.
8490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8491 * @param pvMem The pointer returned by
8492 * iemMemStackPushBeginSpecial().
8493 * @param uNewRsp The new RSP value returned by
8494 * iemMemStackPushBeginSpecial().
8495 */
8496VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8497{
8498 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8499 if (rcStrict == VINF_SUCCESS)
8500 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8501 return rcStrict;
8502}
8503
8504
8505/**
8506 * Begin a special stack pop (used by iret, retf and such).
8507 *
8508 * This will raise \#SS or \#PF if appropriate.
8509 *
8510 * @returns Strict VBox status code.
8511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8512 * @param cbMem The number of bytes to pop from the stack.
8513 * @param cbAlign The alignment mask (7, 3, 1).
8514 * @param ppvMem Where to return the pointer to the stack memory.
8515 * @param puNewRsp Where to return the new RSP value. This must be
8516 * assigned to CPUMCTX::rsp manually some time
8517 * after iemMemStackPopDoneSpecial() has been
8518 * called.
8519 */
8520VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8521 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8522{
8523 Assert(cbMem < UINT8_MAX);
8524 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8525 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8526}
8527
8528
8529/**
8530 * Continue a special stack pop (used by iret and retf), for the purpose of
8531 * retrieving a new stack pointer.
8532 *
8533 * This will raise \#SS or \#PF if appropriate.
8534 *
8535 * @returns Strict VBox status code.
8536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8537 * @param off Offset from the top of the stack. This is zero
8538 * except in the retf case.
8539 * @param cbMem The number of bytes to pop from the stack.
8540 * @param ppvMem Where to return the pointer to the stack memory.
8541 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8542 * return this because all use of this function is
8543 * to retrieve a new value and anything we return
8544 * here would be discarded.)
8545 */
8546VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8547 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8548{
8549 Assert(cbMem < UINT8_MAX);
8550
8551 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8552 RTGCPTR GCPtrTop;
8553 if (IEM_IS_64BIT_CODE(pVCpu))
8554 GCPtrTop = uCurNewRsp;
8555 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8556 GCPtrTop = (uint32_t)uCurNewRsp;
8557 else
8558 GCPtrTop = (uint16_t)uCurNewRsp;
8559
8560 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8561 0 /* checked in iemMemStackPopBeginSpecial */);
8562}
8563
8564
8565/**
8566 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8567 * iemMemStackPopContinueSpecial).
8568 *
8569 * The caller will manually commit the rSP.
8570 *
8571 * @returns Strict VBox status code.
8572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8573 * @param pvMem The pointer returned by
8574 * iemMemStackPopBeginSpecial() or
8575 * iemMemStackPopContinueSpecial().
8576 */
8577VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8578{
8579 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8580}
8581
8582
8583/**
8584 * Fetches a system table byte.
8585 *
8586 * @returns Strict VBox status code.
8587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8588 * @param pbDst Where to return the byte.
8589 * @param iSegReg The index of the segment register to use for
8590 * this access. The base and limits are checked.
8591 * @param GCPtrMem The address of the guest memory.
8592 */
8593VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8594{
8595 /* The lazy approach for now... */
8596 uint8_t const *pbSrc;
8597 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8598 if (rc == VINF_SUCCESS)
8599 {
8600 *pbDst = *pbSrc;
8601 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8602 }
8603 return rc;
8604}
8605
8606
8607/**
8608 * Fetches a system table word.
8609 *
8610 * @returns Strict VBox status code.
8611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8612 * @param pu16Dst Where to return the word.
8613 * @param iSegReg The index of the segment register to use for
8614 * this access. The base and limits are checked.
8615 * @param GCPtrMem The address of the guest memory.
8616 */
8617VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8618{
8619 /* The lazy approach for now... */
8620 uint16_t const *pu16Src;
8621 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8622 if (rc == VINF_SUCCESS)
8623 {
8624 *pu16Dst = *pu16Src;
8625 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8626 }
8627 return rc;
8628}
8629
8630
8631/**
8632 * Fetches a system table dword.
8633 *
8634 * @returns Strict VBox status code.
8635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8636 * @param pu32Dst Where to return the dword.
8637 * @param iSegReg The index of the segment register to use for
8638 * this access. The base and limits are checked.
8639 * @param GCPtrMem The address of the guest memory.
8640 */
8641VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8642{
8643 /* The lazy approach for now... */
8644 uint32_t const *pu32Src;
8645 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8646 if (rc == VINF_SUCCESS)
8647 {
8648 *pu32Dst = *pu32Src;
8649 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8650 }
8651 return rc;
8652}
8653
8654
8655/**
8656 * Fetches a system table qword.
8657 *
8658 * @returns Strict VBox status code.
8659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8660 * @param pu64Dst Where to return the qword.
8661 * @param iSegReg The index of the segment register to use for
8662 * this access. The base and limits are checked.
8663 * @param GCPtrMem The address of the guest memory.
8664 */
8665VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8666{
8667 /* The lazy approach for now... */
8668 uint64_t const *pu64Src;
8669 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8670 if (rc == VINF_SUCCESS)
8671 {
8672 *pu64Dst = *pu64Src;
8673 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8674 }
8675 return rc;
8676}
8677
8678
8679/**
8680 * Fetches a descriptor table entry with caller specified error code.
8681 *
8682 * @returns Strict VBox status code.
8683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8684 * @param pDesc Where to return the descriptor table entry.
8685 * @param uSel The selector which table entry to fetch.
8686 * @param uXcpt The exception to raise on table lookup error.
8687 * @param uErrorCode The error code associated with the exception.
8688 */
8689static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8690 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8691{
8692 AssertPtr(pDesc);
8693 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8694
8695 /** @todo did the 286 require all 8 bytes to be accessible? */
8696 /*
8697 * Get the selector table base and check bounds.
8698 */
8699 RTGCPTR GCPtrBase;
8700 if (uSel & X86_SEL_LDT)
8701 {
8702 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8703 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8704 {
8705 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8706 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8707 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8708 uErrorCode, 0);
8709 }
8710
8711 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8712 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8713 }
8714 else
8715 {
8716 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8717 {
8718 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8719 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8720 uErrorCode, 0);
8721 }
8722 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8723 }
8724
8725 /*
8726 * Read the legacy descriptor and maybe the long mode extensions if
8727 * required.
8728 */
8729 VBOXSTRICTRC rcStrict;
8730 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8731 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8732 else
8733 {
8734 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8735 if (rcStrict == VINF_SUCCESS)
8736 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8737 if (rcStrict == VINF_SUCCESS)
8738 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8739 if (rcStrict == VINF_SUCCESS)
8740 pDesc->Legacy.au16[3] = 0;
8741 else
8742 return rcStrict;
8743 }
8744
8745 if (rcStrict == VINF_SUCCESS)
8746 {
8747 if ( !IEM_IS_LONG_MODE(pVCpu)
8748 || pDesc->Legacy.Gen.u1DescType)
8749 pDesc->Long.au64[1] = 0;
8750 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8751 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8752 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8753 else
8754 {
8755 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8756 /** @todo is this the right exception? */
8757 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8758 }
8759 }
8760 return rcStrict;
8761}
8762
8763
8764/**
8765 * Fetches a descriptor table entry.
8766 *
8767 * @returns Strict VBox status code.
8768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8769 * @param pDesc Where to return the descriptor table entry.
8770 * @param uSel The selector which table entry to fetch.
8771 * @param uXcpt The exception to raise on table lookup error.
8772 */
8773VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8774{
8775 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8776}
8777
8778
8779/**
8780 * Marks the selector descriptor as accessed (only non-system descriptors).
8781 *
8782 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8783 * will therefore skip the limit checks.
8784 *
8785 * @returns Strict VBox status code.
8786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8787 * @param uSel The selector.
8788 */
8789VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8790{
8791 /*
8792 * Get the selector table base and calculate the entry address.
8793 */
8794 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8795 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8796 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8797 GCPtr += uSel & X86_SEL_MASK;
8798
8799 /*
8800 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8801 * ugly stuff to avoid this. This will make sure it's an atomic access
8802 * as well more or less remove any question about 8-bit or 32-bit accesss.
8803 */
8804 VBOXSTRICTRC rcStrict;
8805 uint32_t volatile *pu32;
8806 if ((GCPtr & 3) == 0)
8807 {
8808 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8809 GCPtr += 2 + 2;
8810 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8811 if (rcStrict != VINF_SUCCESS)
8812 return rcStrict;
8813 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8814 }
8815 else
8816 {
8817 /* The misaligned GDT/LDT case, map the whole thing. */
8818 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8819 if (rcStrict != VINF_SUCCESS)
8820 return rcStrict;
8821 switch ((uintptr_t)pu32 & 3)
8822 {
8823 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8824 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8825 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8826 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8827 }
8828 }
8829
8830 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8831}
8832
8833/** @} */
8834
8835/** @name Opcode Helpers.
8836 * @{
8837 */
8838
8839/**
8840 * Calculates the effective address of a ModR/M memory operand.
8841 *
8842 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8843 *
8844 * @return Strict VBox status code.
8845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8846 * @param bRm The ModRM byte.
8847 * @param cbImmAndRspOffset - First byte: The size of any immediate
8848 * following the effective address opcode bytes
8849 * (only for RIP relative addressing).
8850 * - Second byte: RSP displacement (for POP [ESP]).
8851 * @param pGCPtrEff Where to return the effective address.
8852 */
8853VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8854{
8855 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8856# define SET_SS_DEF() \
8857 do \
8858 { \
8859 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8860 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8861 } while (0)
8862
8863 if (!IEM_IS_64BIT_CODE(pVCpu))
8864 {
8865/** @todo Check the effective address size crap! */
8866 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8867 {
8868 uint16_t u16EffAddr;
8869
8870 /* Handle the disp16 form with no registers first. */
8871 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8872 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8873 else
8874 {
8875 /* Get the displacment. */
8876 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8877 {
8878 case 0: u16EffAddr = 0; break;
8879 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8880 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8881 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8882 }
8883
8884 /* Add the base and index registers to the disp. */
8885 switch (bRm & X86_MODRM_RM_MASK)
8886 {
8887 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8888 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8889 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8890 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8891 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8892 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8893 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8894 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8895 }
8896 }
8897
8898 *pGCPtrEff = u16EffAddr;
8899 }
8900 else
8901 {
8902 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8903 uint32_t u32EffAddr;
8904
8905 /* Handle the disp32 form with no registers first. */
8906 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8907 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8908 else
8909 {
8910 /* Get the register (or SIB) value. */
8911 switch ((bRm & X86_MODRM_RM_MASK))
8912 {
8913 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8914 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8915 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8916 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8917 case 4: /* SIB */
8918 {
8919 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8920
8921 /* Get the index and scale it. */
8922 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8923 {
8924 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8925 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8926 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8927 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8928 case 4: u32EffAddr = 0; /*none */ break;
8929 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8930 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8931 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8932 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8933 }
8934 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8935
8936 /* add base */
8937 switch (bSib & X86_SIB_BASE_MASK)
8938 {
8939 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8940 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8941 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8942 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8943 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8944 case 5:
8945 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8946 {
8947 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8948 SET_SS_DEF();
8949 }
8950 else
8951 {
8952 uint32_t u32Disp;
8953 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8954 u32EffAddr += u32Disp;
8955 }
8956 break;
8957 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8958 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8959 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8960 }
8961 break;
8962 }
8963 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8964 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8965 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8966 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8967 }
8968
8969 /* Get and add the displacement. */
8970 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8971 {
8972 case 0:
8973 break;
8974 case 1:
8975 {
8976 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8977 u32EffAddr += i8Disp;
8978 break;
8979 }
8980 case 2:
8981 {
8982 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8983 u32EffAddr += u32Disp;
8984 break;
8985 }
8986 default:
8987 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8988 }
8989
8990 }
8991 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8992 *pGCPtrEff = u32EffAddr;
8993 else
8994 {
8995 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8996 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8997 }
8998 }
8999 }
9000 else
9001 {
9002 uint64_t u64EffAddr;
9003
9004 /* Handle the rip+disp32 form with no registers first. */
9005 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9006 {
9007 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9008 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9009 }
9010 else
9011 {
9012 /* Get the register (or SIB) value. */
9013 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9014 {
9015 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9016 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9017 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9018 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9019 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9020 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9021 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9022 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9023 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9024 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9025 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9026 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9027 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9028 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9029 /* SIB */
9030 case 4:
9031 case 12:
9032 {
9033 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9034
9035 /* Get the index and scale it. */
9036 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9037 {
9038 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9039 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9040 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9041 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9042 case 4: u64EffAddr = 0; /*none */ break;
9043 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9044 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9045 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9046 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9047 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9048 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9049 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9050 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9051 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9052 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9053 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9055 }
9056 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9057
9058 /* add base */
9059 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9060 {
9061 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9062 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9063 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9064 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9065 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9066 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9067 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9068 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9069 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9070 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9071 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9072 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9073 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9074 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9075 /* complicated encodings */
9076 case 5:
9077 case 13:
9078 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9079 {
9080 if (!pVCpu->iem.s.uRexB)
9081 {
9082 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9083 SET_SS_DEF();
9084 }
9085 else
9086 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9087 }
9088 else
9089 {
9090 uint32_t u32Disp;
9091 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9092 u64EffAddr += (int32_t)u32Disp;
9093 }
9094 break;
9095 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9096 }
9097 break;
9098 }
9099 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9100 }
9101
9102 /* Get and add the displacement. */
9103 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9104 {
9105 case 0:
9106 break;
9107 case 1:
9108 {
9109 int8_t i8Disp;
9110 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9111 u64EffAddr += i8Disp;
9112 break;
9113 }
9114 case 2:
9115 {
9116 uint32_t u32Disp;
9117 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9118 u64EffAddr += (int32_t)u32Disp;
9119 break;
9120 }
9121 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9122 }
9123
9124 }
9125
9126 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9127 *pGCPtrEff = u64EffAddr;
9128 else
9129 {
9130 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9131 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9132 }
9133 }
9134
9135 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9136 return VINF_SUCCESS;
9137}
9138
9139
9140#ifdef IEM_WITH_SETJMP
9141/**
9142 * Calculates the effective address of a ModR/M memory operand.
9143 *
9144 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9145 *
9146 * May longjmp on internal error.
9147 *
9148 * @return The effective address.
9149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9150 * @param bRm The ModRM byte.
9151 * @param cbImmAndRspOffset - First byte: The size of any immediate
9152 * following the effective address opcode bytes
9153 * (only for RIP relative addressing).
9154 * - Second byte: RSP displacement (for POP [ESP]).
9155 */
9156RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9157{
9158 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9159# define SET_SS_DEF() \
9160 do \
9161 { \
9162 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9163 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9164 } while (0)
9165
9166 if (!IEM_IS_64BIT_CODE(pVCpu))
9167 {
9168/** @todo Check the effective address size crap! */
9169 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9170 {
9171 uint16_t u16EffAddr;
9172
9173 /* Handle the disp16 form with no registers first. */
9174 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9175 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9176 else
9177 {
9178 /* Get the displacment. */
9179 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9180 {
9181 case 0: u16EffAddr = 0; break;
9182 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9183 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9184 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9185 }
9186
9187 /* Add the base and index registers to the disp. */
9188 switch (bRm & X86_MODRM_RM_MASK)
9189 {
9190 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9191 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9192 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9193 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9194 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9195 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9196 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9197 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9198 }
9199 }
9200
9201 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9202 return u16EffAddr;
9203 }
9204
9205 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9206 uint32_t u32EffAddr;
9207
9208 /* Handle the disp32 form with no registers first. */
9209 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9210 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9211 else
9212 {
9213 /* Get the register (or SIB) value. */
9214 switch ((bRm & X86_MODRM_RM_MASK))
9215 {
9216 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9217 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9218 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9219 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9220 case 4: /* SIB */
9221 {
9222 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9223
9224 /* Get the index and scale it. */
9225 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9226 {
9227 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9228 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9229 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9230 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9231 case 4: u32EffAddr = 0; /*none */ break;
9232 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9233 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9234 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9235 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9236 }
9237 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9238
9239 /* add base */
9240 switch (bSib & X86_SIB_BASE_MASK)
9241 {
9242 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9243 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9244 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9245 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9246 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9247 case 5:
9248 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9249 {
9250 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9251 SET_SS_DEF();
9252 }
9253 else
9254 {
9255 uint32_t u32Disp;
9256 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9257 u32EffAddr += u32Disp;
9258 }
9259 break;
9260 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9261 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9262 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9263 }
9264 break;
9265 }
9266 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9267 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9268 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9269 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9270 }
9271
9272 /* Get and add the displacement. */
9273 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9274 {
9275 case 0:
9276 break;
9277 case 1:
9278 {
9279 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9280 u32EffAddr += i8Disp;
9281 break;
9282 }
9283 case 2:
9284 {
9285 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9286 u32EffAddr += u32Disp;
9287 break;
9288 }
9289 default:
9290 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9291 }
9292 }
9293
9294 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9295 {
9296 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9297 return u32EffAddr;
9298 }
9299 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9300 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9301 return u32EffAddr & UINT16_MAX;
9302 }
9303
9304 uint64_t u64EffAddr;
9305
9306 /* Handle the rip+disp32 form with no registers first. */
9307 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9308 {
9309 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9310 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9311 }
9312 else
9313 {
9314 /* Get the register (or SIB) value. */
9315 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9316 {
9317 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9318 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9319 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9320 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9321 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9322 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9323 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9324 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9325 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9326 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9327 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9328 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9329 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9330 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9331 /* SIB */
9332 case 4:
9333 case 12:
9334 {
9335 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9336
9337 /* Get the index and scale it. */
9338 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9339 {
9340 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9341 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9342 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9343 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9344 case 4: u64EffAddr = 0; /*none */ break;
9345 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9346 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9347 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9348 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9349 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9350 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9351 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9352 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9353 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9354 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9355 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9356 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9357 }
9358 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9359
9360 /* add base */
9361 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9362 {
9363 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9364 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9365 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9366 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9367 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9368 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9369 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9370 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9371 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9372 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9373 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9374 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9375 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9376 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9377 /* complicated encodings */
9378 case 5:
9379 case 13:
9380 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9381 {
9382 if (!pVCpu->iem.s.uRexB)
9383 {
9384 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9385 SET_SS_DEF();
9386 }
9387 else
9388 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9389 }
9390 else
9391 {
9392 uint32_t u32Disp;
9393 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9394 u64EffAddr += (int32_t)u32Disp;
9395 }
9396 break;
9397 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9398 }
9399 break;
9400 }
9401 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9402 }
9403
9404 /* Get and add the displacement. */
9405 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9406 {
9407 case 0:
9408 break;
9409 case 1:
9410 {
9411 int8_t i8Disp;
9412 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9413 u64EffAddr += i8Disp;
9414 break;
9415 }
9416 case 2:
9417 {
9418 uint32_t u32Disp;
9419 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9420 u64EffAddr += (int32_t)u32Disp;
9421 break;
9422 }
9423 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9424 }
9425
9426 }
9427
9428 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9429 {
9430 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9431 return u64EffAddr;
9432 }
9433 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9434 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9435 return u64EffAddr & UINT32_MAX;
9436}
9437#endif /* IEM_WITH_SETJMP */
9438
9439
9440/**
9441 * Calculates the effective address of a ModR/M memory operand, extended version
9442 * for use in the recompilers.
9443 *
9444 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9445 *
9446 * @return Strict VBox status code.
9447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9448 * @param bRm The ModRM byte.
9449 * @param cbImmAndRspOffset - First byte: The size of any immediate
9450 * following the effective address opcode bytes
9451 * (only for RIP relative addressing).
9452 * - Second byte: RSP displacement (for POP [ESP]).
9453 * @param pGCPtrEff Where to return the effective address.
9454 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9455 * SIB byte (bits 39:32).
9456 */
9457VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9458{
9459 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9460# define SET_SS_DEF() \
9461 do \
9462 { \
9463 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9464 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9465 } while (0)
9466
9467 uint64_t uInfo;
9468 if (!IEM_IS_64BIT_CODE(pVCpu))
9469 {
9470/** @todo Check the effective address size crap! */
9471 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9472 {
9473 uint16_t u16EffAddr;
9474
9475 /* Handle the disp16 form with no registers first. */
9476 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9477 {
9478 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9479 uInfo = u16EffAddr;
9480 }
9481 else
9482 {
9483 /* Get the displacment. */
9484 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9485 {
9486 case 0: u16EffAddr = 0; break;
9487 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9488 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9489 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9490 }
9491 uInfo = u16EffAddr;
9492
9493 /* Add the base and index registers to the disp. */
9494 switch (bRm & X86_MODRM_RM_MASK)
9495 {
9496 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9497 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9498 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9499 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9500 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9501 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9502 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9503 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9504 }
9505 }
9506
9507 *pGCPtrEff = u16EffAddr;
9508 }
9509 else
9510 {
9511 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9512 uint32_t u32EffAddr;
9513
9514 /* Handle the disp32 form with no registers first. */
9515 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9516 {
9517 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9518 uInfo = u32EffAddr;
9519 }
9520 else
9521 {
9522 /* Get the register (or SIB) value. */
9523 uInfo = 0;
9524 switch ((bRm & X86_MODRM_RM_MASK))
9525 {
9526 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9527 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9528 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9529 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9530 case 4: /* SIB */
9531 {
9532 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9533 uInfo = (uint64_t)bSib << 32;
9534
9535 /* Get the index and scale it. */
9536 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9537 {
9538 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9539 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9540 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9541 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9542 case 4: u32EffAddr = 0; /*none */ break;
9543 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9544 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9545 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9546 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9547 }
9548 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9549
9550 /* add base */
9551 switch (bSib & X86_SIB_BASE_MASK)
9552 {
9553 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9554 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9555 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9556 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9557 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9558 case 5:
9559 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9560 {
9561 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9562 SET_SS_DEF();
9563 }
9564 else
9565 {
9566 uint32_t u32Disp;
9567 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9568 u32EffAddr += u32Disp;
9569 uInfo |= u32Disp;
9570 }
9571 break;
9572 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9573 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9574 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9575 }
9576 break;
9577 }
9578 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9579 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9580 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9581 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9582 }
9583
9584 /* Get and add the displacement. */
9585 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9586 {
9587 case 0:
9588 break;
9589 case 1:
9590 {
9591 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9592 u32EffAddr += i8Disp;
9593 uInfo |= (uint32_t)(int32_t)i8Disp;
9594 break;
9595 }
9596 case 2:
9597 {
9598 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9599 u32EffAddr += u32Disp;
9600 uInfo |= (uint32_t)u32Disp;
9601 break;
9602 }
9603 default:
9604 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9605 }
9606
9607 }
9608 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9609 *pGCPtrEff = u32EffAddr;
9610 else
9611 {
9612 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9613 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9614 }
9615 }
9616 }
9617 else
9618 {
9619 uint64_t u64EffAddr;
9620
9621 /* Handle the rip+disp32 form with no registers first. */
9622 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9623 {
9624 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9625 uInfo = (uint32_t)u64EffAddr;
9626 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9627 }
9628 else
9629 {
9630 /* Get the register (or SIB) value. */
9631 uInfo = 0;
9632 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9633 {
9634 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9635 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9636 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9637 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9638 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9639 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9640 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9641 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9642 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9643 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9644 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9645 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9646 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9647 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9648 /* SIB */
9649 case 4:
9650 case 12:
9651 {
9652 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9653 uInfo = (uint64_t)bSib << 32;
9654
9655 /* Get the index and scale it. */
9656 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9657 {
9658 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9659 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9660 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9661 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9662 case 4: u64EffAddr = 0; /*none */ break;
9663 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9664 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9665 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9666 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9667 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9668 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9669 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9670 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9671 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9672 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9673 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9674 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9675 }
9676 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9677
9678 /* add base */
9679 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9680 {
9681 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9682 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9683 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9684 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9685 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9686 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9687 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9688 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9689 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9690 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9691 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9692 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9693 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9694 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9695 /* complicated encodings */
9696 case 5:
9697 case 13:
9698 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9699 {
9700 if (!pVCpu->iem.s.uRexB)
9701 {
9702 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9703 SET_SS_DEF();
9704 }
9705 else
9706 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9707 }
9708 else
9709 {
9710 uint32_t u32Disp;
9711 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9712 u64EffAddr += (int32_t)u32Disp;
9713 uInfo |= u32Disp;
9714 }
9715 break;
9716 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9717 }
9718 break;
9719 }
9720 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9721 }
9722
9723 /* Get and add the displacement. */
9724 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9725 {
9726 case 0:
9727 break;
9728 case 1:
9729 {
9730 int8_t i8Disp;
9731 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9732 u64EffAddr += i8Disp;
9733 uInfo |= (uint32_t)(int32_t)i8Disp;
9734 break;
9735 }
9736 case 2:
9737 {
9738 uint32_t u32Disp;
9739 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9740 u64EffAddr += (int32_t)u32Disp;
9741 uInfo |= u32Disp;
9742 break;
9743 }
9744 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9745 }
9746
9747 }
9748
9749 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9750 *pGCPtrEff = u64EffAddr;
9751 else
9752 {
9753 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9754 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9755 }
9756 }
9757 *puInfo = uInfo;
9758
9759 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9760 return VINF_SUCCESS;
9761}
9762
9763/** @} */
9764
9765
9766#ifdef LOG_ENABLED
9767/**
9768 * Logs the current instruction.
9769 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9770 * @param fSameCtx Set if we have the same context information as the VMM,
9771 * clear if we may have already executed an instruction in
9772 * our debug context. When clear, we assume IEMCPU holds
9773 * valid CPU mode info.
9774 *
9775 * The @a fSameCtx parameter is now misleading and obsolete.
9776 * @param pszFunction The IEM function doing the execution.
9777 */
9778static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9779{
9780# ifdef IN_RING3
9781 if (LogIs2Enabled())
9782 {
9783 char szInstr[256];
9784 uint32_t cbInstr = 0;
9785 if (fSameCtx)
9786 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9787 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9788 szInstr, sizeof(szInstr), &cbInstr);
9789 else
9790 {
9791 uint32_t fFlags = 0;
9792 switch (IEM_GET_CPU_MODE(pVCpu))
9793 {
9794 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9795 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9796 case IEMMODE_16BIT:
9797 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9798 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9799 else
9800 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9801 break;
9802 }
9803 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9804 szInstr, sizeof(szInstr), &cbInstr);
9805 }
9806
9807 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9808 Log2(("**** %s fExec=%x\n"
9809 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9810 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9811 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9812 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9813 " %s\n"
9814 , pszFunction, pVCpu->iem.s.fExec,
9815 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9816 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9817 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9818 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9819 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9820 szInstr));
9821
9822 if (LogIs3Enabled())
9823 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9824 }
9825 else
9826# endif
9827 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9828 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9829 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9830}
9831#endif /* LOG_ENABLED */
9832
9833
9834#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9835/**
9836 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9837 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9838 *
9839 * @returns Modified rcStrict.
9840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9841 * @param rcStrict The instruction execution status.
9842 */
9843static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9844{
9845 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9846 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9847 {
9848 /* VMX preemption timer takes priority over NMI-window exits. */
9849 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9850 {
9851 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9852 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9853 }
9854 /*
9855 * Check remaining intercepts.
9856 *
9857 * NMI-window and Interrupt-window VM-exits.
9858 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9859 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9860 *
9861 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9862 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9863 */
9864 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9865 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9866 && !TRPMHasTrap(pVCpu))
9867 {
9868 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9869 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9870 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9871 {
9872 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9873 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9874 }
9875 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9876 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9877 {
9878 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9879 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9880 }
9881 }
9882 }
9883 /* TPR-below threshold/APIC write has the highest priority. */
9884 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9885 {
9886 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9887 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9888 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9889 }
9890 /* MTF takes priority over VMX-preemption timer. */
9891 else
9892 {
9893 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9894 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9895 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9896 }
9897 return rcStrict;
9898}
9899#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9900
9901
9902/**
9903 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9904 * IEMExecOneWithPrefetchedByPC.
9905 *
9906 * Similar code is found in IEMExecLots.
9907 *
9908 * @return Strict VBox status code.
9909 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9910 * @param fExecuteInhibit If set, execute the instruction following CLI,
9911 * POP SS and MOV SS,GR.
9912 * @param pszFunction The calling function name.
9913 */
9914DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9915{
9916 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9917 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9918 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9919 RT_NOREF_PV(pszFunction);
9920
9921#ifdef IEM_WITH_SETJMP
9922 VBOXSTRICTRC rcStrict;
9923 IEM_TRY_SETJMP(pVCpu, rcStrict)
9924 {
9925 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9926 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9927 }
9928 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9929 {
9930 pVCpu->iem.s.cLongJumps++;
9931 }
9932 IEM_CATCH_LONGJMP_END(pVCpu);
9933#else
9934 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9935 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9936#endif
9937 if (rcStrict == VINF_SUCCESS)
9938 pVCpu->iem.s.cInstructions++;
9939 if (pVCpu->iem.s.cActiveMappings > 0)
9940 {
9941 Assert(rcStrict != VINF_SUCCESS);
9942 iemMemRollback(pVCpu);
9943 }
9944 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9945 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9946 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9947
9948//#ifdef DEBUG
9949// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9950//#endif
9951
9952#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9953 /*
9954 * Perform any VMX nested-guest instruction boundary actions.
9955 *
9956 * If any of these causes a VM-exit, we must skip executing the next
9957 * instruction (would run into stale page tables). A VM-exit makes sure
9958 * there is no interrupt-inhibition, so that should ensure we don't go
9959 * to try execute the next instruction. Clearing fExecuteInhibit is
9960 * problematic because of the setjmp/longjmp clobbering above.
9961 */
9962 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9963 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9964 || rcStrict != VINF_SUCCESS)
9965 { /* likely */ }
9966 else
9967 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9968#endif
9969
9970 /* Execute the next instruction as well if a cli, pop ss or
9971 mov ss, Gr has just completed successfully. */
9972 if ( fExecuteInhibit
9973 && rcStrict == VINF_SUCCESS
9974 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9975 {
9976 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9977 if (rcStrict == VINF_SUCCESS)
9978 {
9979#ifdef LOG_ENABLED
9980 iemLogCurInstr(pVCpu, false, pszFunction);
9981#endif
9982#ifdef IEM_WITH_SETJMP
9983 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9984 {
9985 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9986 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9987 }
9988 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9989 {
9990 pVCpu->iem.s.cLongJumps++;
9991 }
9992 IEM_CATCH_LONGJMP_END(pVCpu);
9993#else
9994 IEM_OPCODE_GET_FIRST_U8(&b);
9995 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9996#endif
9997 if (rcStrict == VINF_SUCCESS)
9998 {
9999 pVCpu->iem.s.cInstructions++;
10000#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10001 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10002 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10003 { /* likely */ }
10004 else
10005 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10006#endif
10007 }
10008 if (pVCpu->iem.s.cActiveMappings > 0)
10009 {
10010 Assert(rcStrict != VINF_SUCCESS);
10011 iemMemRollback(pVCpu);
10012 }
10013 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10014 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10015 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10016 }
10017 else if (pVCpu->iem.s.cActiveMappings > 0)
10018 iemMemRollback(pVCpu);
10019 /** @todo drop this after we bake this change into RIP advancing. */
10020 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10021 }
10022
10023 /*
10024 * Return value fiddling, statistics and sanity assertions.
10025 */
10026 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10027
10028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10029 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10030 return rcStrict;
10031}
10032
10033
10034/**
10035 * Execute one instruction.
10036 *
10037 * @return Strict VBox status code.
10038 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10039 */
10040VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10041{
10042 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10043#ifdef LOG_ENABLED
10044 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10045#endif
10046
10047 /*
10048 * Do the decoding and emulation.
10049 */
10050 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10051 if (rcStrict == VINF_SUCCESS)
10052 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10053 else if (pVCpu->iem.s.cActiveMappings > 0)
10054 iemMemRollback(pVCpu);
10055
10056 if (rcStrict != VINF_SUCCESS)
10057 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10058 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10059 return rcStrict;
10060}
10061
10062
10063VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10064{
10065 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10066 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10067 if (rcStrict == VINF_SUCCESS)
10068 {
10069 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10070 if (pcbWritten)
10071 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10072 }
10073 else if (pVCpu->iem.s.cActiveMappings > 0)
10074 iemMemRollback(pVCpu);
10075
10076 return rcStrict;
10077}
10078
10079
10080VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10081 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10082{
10083 VBOXSTRICTRC rcStrict;
10084 if ( cbOpcodeBytes
10085 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10086 {
10087 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10088#ifdef IEM_WITH_CODE_TLB
10089 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10090 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10091 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10092 pVCpu->iem.s.offCurInstrStart = 0;
10093 pVCpu->iem.s.offInstrNextByte = 0;
10094 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10095#else
10096 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10097 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10098#endif
10099 rcStrict = VINF_SUCCESS;
10100 }
10101 else
10102 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10103 if (rcStrict == VINF_SUCCESS)
10104 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10105 else if (pVCpu->iem.s.cActiveMappings > 0)
10106 iemMemRollback(pVCpu);
10107
10108 return rcStrict;
10109}
10110
10111
10112VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10113{
10114 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10115 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10116 if (rcStrict == VINF_SUCCESS)
10117 {
10118 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10119 if (pcbWritten)
10120 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10121 }
10122 else if (pVCpu->iem.s.cActiveMappings > 0)
10123 iemMemRollback(pVCpu);
10124
10125 return rcStrict;
10126}
10127
10128
10129VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10130 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10131{
10132 VBOXSTRICTRC rcStrict;
10133 if ( cbOpcodeBytes
10134 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10135 {
10136 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10137#ifdef IEM_WITH_CODE_TLB
10138 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10139 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10140 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10141 pVCpu->iem.s.offCurInstrStart = 0;
10142 pVCpu->iem.s.offInstrNextByte = 0;
10143 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10144#else
10145 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10146 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10147#endif
10148 rcStrict = VINF_SUCCESS;
10149 }
10150 else
10151 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10152 if (rcStrict == VINF_SUCCESS)
10153 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10154 else if (pVCpu->iem.s.cActiveMappings > 0)
10155 iemMemRollback(pVCpu);
10156
10157 return rcStrict;
10158}
10159
10160
10161/**
10162 * For handling split cacheline lock operations when the host has split-lock
10163 * detection enabled.
10164 *
10165 * This will cause the interpreter to disregard the lock prefix and implicit
10166 * locking (xchg).
10167 *
10168 * @returns Strict VBox status code.
10169 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10170 */
10171VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10172{
10173 /*
10174 * Do the decoding and emulation.
10175 */
10176 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10177 if (rcStrict == VINF_SUCCESS)
10178 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10179 else if (pVCpu->iem.s.cActiveMappings > 0)
10180 iemMemRollback(pVCpu);
10181
10182 if (rcStrict != VINF_SUCCESS)
10183 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10184 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10185 return rcStrict;
10186}
10187
10188
10189/**
10190 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
10191 * inject a pending TRPM trap.
10192 */
10193VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
10194{
10195 Assert(TRPMHasTrap(pVCpu));
10196
10197 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10198 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10199 {
10200 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10201#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10202 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10203 if (fIntrEnabled)
10204 {
10205 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10206 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10207 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10208 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10209 else
10210 {
10211 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10212 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10213 }
10214 }
10215#else
10216 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10217#endif
10218 if (fIntrEnabled)
10219 {
10220 uint8_t u8TrapNo;
10221 TRPMEVENT enmType;
10222 uint32_t uErrCode;
10223 RTGCPTR uCr2;
10224 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10225 AssertRC(rc2);
10226 Assert(enmType == TRPM_HARDWARE_INT);
10227 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10228
10229 TRPMResetTrap(pVCpu);
10230
10231#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10232 /* Injecting an event may cause a VM-exit. */
10233 if ( rcStrict != VINF_SUCCESS
10234 && rcStrict != VINF_IEM_RAISED_XCPT)
10235 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10236#else
10237 NOREF(rcStrict);
10238#endif
10239 }
10240 }
10241
10242 return VINF_SUCCESS;
10243}
10244
10245
10246VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10247{
10248 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10249 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10250 Assert(cMaxInstructions > 0);
10251
10252 /*
10253 * See if there is an interrupt pending in TRPM, inject it if we can.
10254 */
10255 /** @todo What if we are injecting an exception and not an interrupt? Is that
10256 * possible here? For now we assert it is indeed only an interrupt. */
10257 if (!TRPMHasTrap(pVCpu))
10258 { /* likely */ }
10259 else
10260 {
10261 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
10262 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10263 { /*likely */ }
10264 else
10265 return rcStrict;
10266 }
10267
10268 /*
10269 * Initial decoder init w/ prefetch, then setup setjmp.
10270 */
10271 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10272 if (rcStrict == VINF_SUCCESS)
10273 {
10274#ifdef IEM_WITH_SETJMP
10275 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10276 IEM_TRY_SETJMP(pVCpu, rcStrict)
10277#endif
10278 {
10279 /*
10280 * The run loop. We limit ourselves to 4096 instructions right now.
10281 */
10282 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10283 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10284 for (;;)
10285 {
10286 /*
10287 * Log the state.
10288 */
10289#ifdef LOG_ENABLED
10290 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10291#endif
10292
10293 /*
10294 * Do the decoding and emulation.
10295 */
10296 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10297 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10298#ifdef VBOX_STRICT
10299 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10300#endif
10301 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10302 {
10303 Assert(pVCpu->iem.s.cActiveMappings == 0);
10304 pVCpu->iem.s.cInstructions++;
10305
10306#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10307 /* Perform any VMX nested-guest instruction boundary actions. */
10308 uint64_t fCpu = pVCpu->fLocalForcedActions;
10309 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10310 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10311 { /* likely */ }
10312 else
10313 {
10314 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10315 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10316 fCpu = pVCpu->fLocalForcedActions;
10317 else
10318 {
10319 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10320 break;
10321 }
10322 }
10323#endif
10324 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10325 {
10326#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10327 uint64_t fCpu = pVCpu->fLocalForcedActions;
10328#endif
10329 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10330 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10331 | VMCPU_FF_TLB_FLUSH
10332 | VMCPU_FF_UNHALT );
10333
10334 if (RT_LIKELY( ( !fCpu
10335 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10336 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10337 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10338 {
10339 if (--cMaxInstructionsGccStupidity > 0)
10340 {
10341 /* Poll timers every now an then according to the caller's specs. */
10342 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10343 || !TMTimerPollBool(pVM, pVCpu))
10344 {
10345 Assert(pVCpu->iem.s.cActiveMappings == 0);
10346 iemReInitDecoder(pVCpu);
10347 continue;
10348 }
10349 }
10350 }
10351 }
10352 Assert(pVCpu->iem.s.cActiveMappings == 0);
10353 }
10354 else if (pVCpu->iem.s.cActiveMappings > 0)
10355 iemMemRollback(pVCpu);
10356 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10357 break;
10358 }
10359 }
10360#ifdef IEM_WITH_SETJMP
10361 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10362 {
10363 if (pVCpu->iem.s.cActiveMappings > 0)
10364 iemMemRollback(pVCpu);
10365# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10366 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10367# endif
10368 pVCpu->iem.s.cLongJumps++;
10369 }
10370 IEM_CATCH_LONGJMP_END(pVCpu);
10371#endif
10372
10373 /*
10374 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10375 */
10376 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10377 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10378 }
10379 else
10380 {
10381 if (pVCpu->iem.s.cActiveMappings > 0)
10382 iemMemRollback(pVCpu);
10383
10384#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10385 /*
10386 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10387 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10388 */
10389 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10390#endif
10391 }
10392
10393 /*
10394 * Maybe re-enter raw-mode and log.
10395 */
10396 if (rcStrict != VINF_SUCCESS)
10397 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10398 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10399 if (pcInstructions)
10400 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10401 return rcStrict;
10402}
10403
10404
10405/**
10406 * Interface used by EMExecuteExec, does exit statistics and limits.
10407 *
10408 * @returns Strict VBox status code.
10409 * @param pVCpu The cross context virtual CPU structure.
10410 * @param fWillExit To be defined.
10411 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10412 * @param cMaxInstructions Maximum number of instructions to execute.
10413 * @param cMaxInstructionsWithoutExits
10414 * The max number of instructions without exits.
10415 * @param pStats Where to return statistics.
10416 */
10417VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10418 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10419{
10420 NOREF(fWillExit); /** @todo define flexible exit crits */
10421
10422 /*
10423 * Initialize return stats.
10424 */
10425 pStats->cInstructions = 0;
10426 pStats->cExits = 0;
10427 pStats->cMaxExitDistance = 0;
10428 pStats->cReserved = 0;
10429
10430 /*
10431 * Initial decoder init w/ prefetch, then setup setjmp.
10432 */
10433 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10434 if (rcStrict == VINF_SUCCESS)
10435 {
10436#ifdef IEM_WITH_SETJMP
10437 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10438 IEM_TRY_SETJMP(pVCpu, rcStrict)
10439#endif
10440 {
10441#ifdef IN_RING0
10442 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10443#endif
10444 uint32_t cInstructionSinceLastExit = 0;
10445
10446 /*
10447 * The run loop. We limit ourselves to 4096 instructions right now.
10448 */
10449 PVM pVM = pVCpu->CTX_SUFF(pVM);
10450 for (;;)
10451 {
10452 /*
10453 * Log the state.
10454 */
10455#ifdef LOG_ENABLED
10456 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10457#endif
10458
10459 /*
10460 * Do the decoding and emulation.
10461 */
10462 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10463
10464 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10465 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10466
10467 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10468 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10469 {
10470 pStats->cExits += 1;
10471 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10472 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10473 cInstructionSinceLastExit = 0;
10474 }
10475
10476 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10477 {
10478 Assert(pVCpu->iem.s.cActiveMappings == 0);
10479 pVCpu->iem.s.cInstructions++;
10480 pStats->cInstructions++;
10481 cInstructionSinceLastExit++;
10482
10483#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10484 /* Perform any VMX nested-guest instruction boundary actions. */
10485 uint64_t fCpu = pVCpu->fLocalForcedActions;
10486 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10487 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10488 { /* likely */ }
10489 else
10490 {
10491 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10492 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10493 fCpu = pVCpu->fLocalForcedActions;
10494 else
10495 {
10496 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10497 break;
10498 }
10499 }
10500#endif
10501 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10502 {
10503#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10504 uint64_t fCpu = pVCpu->fLocalForcedActions;
10505#endif
10506 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10507 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10508 | VMCPU_FF_TLB_FLUSH
10509 | VMCPU_FF_UNHALT );
10510 if (RT_LIKELY( ( ( !fCpu
10511 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10512 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10513 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10514 || pStats->cInstructions < cMinInstructions))
10515 {
10516 if (pStats->cInstructions < cMaxInstructions)
10517 {
10518 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10519 {
10520#ifdef IN_RING0
10521 if ( !fCheckPreemptionPending
10522 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10523#endif
10524 {
10525 Assert(pVCpu->iem.s.cActiveMappings == 0);
10526 iemReInitDecoder(pVCpu);
10527 continue;
10528 }
10529#ifdef IN_RING0
10530 rcStrict = VINF_EM_RAW_INTERRUPT;
10531 break;
10532#endif
10533 }
10534 }
10535 }
10536 Assert(!(fCpu & VMCPU_FF_IEM));
10537 }
10538 Assert(pVCpu->iem.s.cActiveMappings == 0);
10539 }
10540 else if (pVCpu->iem.s.cActiveMappings > 0)
10541 iemMemRollback(pVCpu);
10542 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10543 break;
10544 }
10545 }
10546#ifdef IEM_WITH_SETJMP
10547 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10548 {
10549 if (pVCpu->iem.s.cActiveMappings > 0)
10550 iemMemRollback(pVCpu);
10551 pVCpu->iem.s.cLongJumps++;
10552 }
10553 IEM_CATCH_LONGJMP_END(pVCpu);
10554#endif
10555
10556 /*
10557 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10558 */
10559 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10560 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10561 }
10562 else
10563 {
10564 if (pVCpu->iem.s.cActiveMappings > 0)
10565 iemMemRollback(pVCpu);
10566
10567#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10568 /*
10569 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10570 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10571 */
10572 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10573#endif
10574 }
10575
10576 /*
10577 * Maybe re-enter raw-mode and log.
10578 */
10579 if (rcStrict != VINF_SUCCESS)
10580 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10581 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10582 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10583 return rcStrict;
10584}
10585
10586
10587/**
10588 * Injects a trap, fault, abort, software interrupt or external interrupt.
10589 *
10590 * The parameter list matches TRPMQueryTrapAll pretty closely.
10591 *
10592 * @returns Strict VBox status code.
10593 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10594 * @param u8TrapNo The trap number.
10595 * @param enmType What type is it (trap/fault/abort), software
10596 * interrupt or hardware interrupt.
10597 * @param uErrCode The error code if applicable.
10598 * @param uCr2 The CR2 value if applicable.
10599 * @param cbInstr The instruction length (only relevant for
10600 * software interrupts).
10601 */
10602VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10603 uint8_t cbInstr)
10604{
10605 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10606#ifdef DBGFTRACE_ENABLED
10607 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10608 u8TrapNo, enmType, uErrCode, uCr2);
10609#endif
10610
10611 uint32_t fFlags;
10612 switch (enmType)
10613 {
10614 case TRPM_HARDWARE_INT:
10615 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10616 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10617 uErrCode = uCr2 = 0;
10618 break;
10619
10620 case TRPM_SOFTWARE_INT:
10621 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10622 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10623 uErrCode = uCr2 = 0;
10624 break;
10625
10626 case TRPM_TRAP:
10627 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10628 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10629 if (u8TrapNo == X86_XCPT_PF)
10630 fFlags |= IEM_XCPT_FLAGS_CR2;
10631 switch (u8TrapNo)
10632 {
10633 case X86_XCPT_DF:
10634 case X86_XCPT_TS:
10635 case X86_XCPT_NP:
10636 case X86_XCPT_SS:
10637 case X86_XCPT_PF:
10638 case X86_XCPT_AC:
10639 case X86_XCPT_GP:
10640 fFlags |= IEM_XCPT_FLAGS_ERR;
10641 break;
10642 }
10643 break;
10644
10645 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10646 }
10647
10648 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10649
10650 if (pVCpu->iem.s.cActiveMappings > 0)
10651 iemMemRollback(pVCpu);
10652
10653 return rcStrict;
10654}
10655
10656
10657/**
10658 * Injects the active TRPM event.
10659 *
10660 * @returns Strict VBox status code.
10661 * @param pVCpu The cross context virtual CPU structure.
10662 */
10663VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10664{
10665#ifndef IEM_IMPLEMENTS_TASKSWITCH
10666 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10667#else
10668 uint8_t u8TrapNo;
10669 TRPMEVENT enmType;
10670 uint32_t uErrCode;
10671 RTGCUINTPTR uCr2;
10672 uint8_t cbInstr;
10673 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10674 if (RT_FAILURE(rc))
10675 return rc;
10676
10677 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10678 * ICEBP \#DB injection as a special case. */
10679 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10680#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10681 if (rcStrict == VINF_SVM_VMEXIT)
10682 rcStrict = VINF_SUCCESS;
10683#endif
10684#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10685 if (rcStrict == VINF_VMX_VMEXIT)
10686 rcStrict = VINF_SUCCESS;
10687#endif
10688 /** @todo Are there any other codes that imply the event was successfully
10689 * delivered to the guest? See @bugref{6607}. */
10690 if ( rcStrict == VINF_SUCCESS
10691 || rcStrict == VINF_IEM_RAISED_XCPT)
10692 TRPMResetTrap(pVCpu);
10693
10694 return rcStrict;
10695#endif
10696}
10697
10698
10699VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10700{
10701 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10702 return VERR_NOT_IMPLEMENTED;
10703}
10704
10705
10706VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10707{
10708 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10709 return VERR_NOT_IMPLEMENTED;
10710}
10711
10712
10713/**
10714 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10715 *
10716 * This API ASSUMES that the caller has already verified that the guest code is
10717 * allowed to access the I/O port. (The I/O port is in the DX register in the
10718 * guest state.)
10719 *
10720 * @returns Strict VBox status code.
10721 * @param pVCpu The cross context virtual CPU structure.
10722 * @param cbValue The size of the I/O port access (1, 2, or 4).
10723 * @param enmAddrMode The addressing mode.
10724 * @param fRepPrefix Indicates whether a repeat prefix is used
10725 * (doesn't matter which for this instruction).
10726 * @param cbInstr The instruction length in bytes.
10727 * @param iEffSeg The effective segment address.
10728 * @param fIoChecked Whether the access to the I/O port has been
10729 * checked or not. It's typically checked in the
10730 * HM scenario.
10731 */
10732VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10733 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10734{
10735 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10736 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10737
10738 /*
10739 * State init.
10740 */
10741 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10742
10743 /*
10744 * Switch orgy for getting to the right handler.
10745 */
10746 VBOXSTRICTRC rcStrict;
10747 if (fRepPrefix)
10748 {
10749 switch (enmAddrMode)
10750 {
10751 case IEMMODE_16BIT:
10752 switch (cbValue)
10753 {
10754 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10755 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10756 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10757 default:
10758 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10759 }
10760 break;
10761
10762 case IEMMODE_32BIT:
10763 switch (cbValue)
10764 {
10765 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10766 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10767 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10768 default:
10769 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10770 }
10771 break;
10772
10773 case IEMMODE_64BIT:
10774 switch (cbValue)
10775 {
10776 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10777 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10778 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10779 default:
10780 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10781 }
10782 break;
10783
10784 default:
10785 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10786 }
10787 }
10788 else
10789 {
10790 switch (enmAddrMode)
10791 {
10792 case IEMMODE_16BIT:
10793 switch (cbValue)
10794 {
10795 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10796 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10797 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10798 default:
10799 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10800 }
10801 break;
10802
10803 case IEMMODE_32BIT:
10804 switch (cbValue)
10805 {
10806 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10807 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10808 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10809 default:
10810 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10811 }
10812 break;
10813
10814 case IEMMODE_64BIT:
10815 switch (cbValue)
10816 {
10817 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10818 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10819 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10820 default:
10821 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10822 }
10823 break;
10824
10825 default:
10826 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10827 }
10828 }
10829
10830 if (pVCpu->iem.s.cActiveMappings)
10831 iemMemRollback(pVCpu);
10832
10833 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10834}
10835
10836
10837/**
10838 * Interface for HM and EM for executing string I/O IN (read) instructions.
10839 *
10840 * This API ASSUMES that the caller has already verified that the guest code is
10841 * allowed to access the I/O port. (The I/O port is in the DX register in the
10842 * guest state.)
10843 *
10844 * @returns Strict VBox status code.
10845 * @param pVCpu The cross context virtual CPU structure.
10846 * @param cbValue The size of the I/O port access (1, 2, or 4).
10847 * @param enmAddrMode The addressing mode.
10848 * @param fRepPrefix Indicates whether a repeat prefix is used
10849 * (doesn't matter which for this instruction).
10850 * @param cbInstr The instruction length in bytes.
10851 * @param fIoChecked Whether the access to the I/O port has been
10852 * checked or not. It's typically checked in the
10853 * HM scenario.
10854 */
10855VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10856 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10857{
10858 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10859
10860 /*
10861 * State init.
10862 */
10863 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10864
10865 /*
10866 * Switch orgy for getting to the right handler.
10867 */
10868 VBOXSTRICTRC rcStrict;
10869 if (fRepPrefix)
10870 {
10871 switch (enmAddrMode)
10872 {
10873 case IEMMODE_16BIT:
10874 switch (cbValue)
10875 {
10876 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10877 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10878 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10879 default:
10880 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10881 }
10882 break;
10883
10884 case IEMMODE_32BIT:
10885 switch (cbValue)
10886 {
10887 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10888 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10889 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10890 default:
10891 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10892 }
10893 break;
10894
10895 case IEMMODE_64BIT:
10896 switch (cbValue)
10897 {
10898 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10899 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10900 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10901 default:
10902 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10903 }
10904 break;
10905
10906 default:
10907 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10908 }
10909 }
10910 else
10911 {
10912 switch (enmAddrMode)
10913 {
10914 case IEMMODE_16BIT:
10915 switch (cbValue)
10916 {
10917 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10918 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10919 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10920 default:
10921 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10922 }
10923 break;
10924
10925 case IEMMODE_32BIT:
10926 switch (cbValue)
10927 {
10928 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10929 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10930 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10931 default:
10932 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10933 }
10934 break;
10935
10936 case IEMMODE_64BIT:
10937 switch (cbValue)
10938 {
10939 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10940 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10941 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10942 default:
10943 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10944 }
10945 break;
10946
10947 default:
10948 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10949 }
10950 }
10951
10952 if ( pVCpu->iem.s.cActiveMappings == 0
10953 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10954 { /* likely */ }
10955 else
10956 {
10957 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10958 iemMemRollback(pVCpu);
10959 }
10960 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10961}
10962
10963
10964/**
10965 * Interface for rawmode to write execute an OUT instruction.
10966 *
10967 * @returns Strict VBox status code.
10968 * @param pVCpu The cross context virtual CPU structure.
10969 * @param cbInstr The instruction length in bytes.
10970 * @param u16Port The port to read.
10971 * @param fImm Whether the port is specified using an immediate operand or
10972 * using the implicit DX register.
10973 * @param cbReg The register size.
10974 *
10975 * @remarks In ring-0 not all of the state needs to be synced in.
10976 */
10977VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10978{
10979 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10980 Assert(cbReg <= 4 && cbReg != 3);
10981
10982 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10983 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10984 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10985 Assert(!pVCpu->iem.s.cActiveMappings);
10986 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10987}
10988
10989
10990/**
10991 * Interface for rawmode to write execute an IN instruction.
10992 *
10993 * @returns Strict VBox status code.
10994 * @param pVCpu The cross context virtual CPU structure.
10995 * @param cbInstr The instruction length in bytes.
10996 * @param u16Port The port to read.
10997 * @param fImm Whether the port is specified using an immediate operand or
10998 * using the implicit DX.
10999 * @param cbReg The register size.
11000 */
11001VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11002{
11003 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11004 Assert(cbReg <= 4 && cbReg != 3);
11005
11006 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11007 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11008 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11009 Assert(!pVCpu->iem.s.cActiveMappings);
11010 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11011}
11012
11013
11014/**
11015 * Interface for HM and EM to write to a CRx register.
11016 *
11017 * @returns Strict VBox status code.
11018 * @param pVCpu The cross context virtual CPU structure.
11019 * @param cbInstr The instruction length in bytes.
11020 * @param iCrReg The control register number (destination).
11021 * @param iGReg The general purpose register number (source).
11022 *
11023 * @remarks In ring-0 not all of the state needs to be synced in.
11024 */
11025VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11026{
11027 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11028 Assert(iCrReg < 16);
11029 Assert(iGReg < 16);
11030
11031 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11032 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11033 Assert(!pVCpu->iem.s.cActiveMappings);
11034 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11035}
11036
11037
11038/**
11039 * Interface for HM and EM to read from a CRx register.
11040 *
11041 * @returns Strict VBox status code.
11042 * @param pVCpu The cross context virtual CPU structure.
11043 * @param cbInstr The instruction length in bytes.
11044 * @param iGReg The general purpose register number (destination).
11045 * @param iCrReg The control register number (source).
11046 *
11047 * @remarks In ring-0 not all of the state needs to be synced in.
11048 */
11049VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11050{
11051 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11052 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11053 | CPUMCTX_EXTRN_APIC_TPR);
11054 Assert(iCrReg < 16);
11055 Assert(iGReg < 16);
11056
11057 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11058 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11059 Assert(!pVCpu->iem.s.cActiveMappings);
11060 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11061}
11062
11063
11064/**
11065 * Interface for HM and EM to write to a DRx register.
11066 *
11067 * @returns Strict VBox status code.
11068 * @param pVCpu The cross context virtual CPU structure.
11069 * @param cbInstr The instruction length in bytes.
11070 * @param iDrReg The debug register number (destination).
11071 * @param iGReg The general purpose register number (source).
11072 *
11073 * @remarks In ring-0 not all of the state needs to be synced in.
11074 */
11075VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11076{
11077 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11078 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11079 Assert(iDrReg < 8);
11080 Assert(iGReg < 16);
11081
11082 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11083 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11084 Assert(!pVCpu->iem.s.cActiveMappings);
11085 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11086}
11087
11088
11089/**
11090 * Interface for HM and EM to read from a DRx register.
11091 *
11092 * @returns Strict VBox status code.
11093 * @param pVCpu The cross context virtual CPU structure.
11094 * @param cbInstr The instruction length in bytes.
11095 * @param iGReg The general purpose register number (destination).
11096 * @param iDrReg The debug register number (source).
11097 *
11098 * @remarks In ring-0 not all of the state needs to be synced in.
11099 */
11100VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11101{
11102 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11103 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11104 Assert(iDrReg < 8);
11105 Assert(iGReg < 16);
11106
11107 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11108 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11109 Assert(!pVCpu->iem.s.cActiveMappings);
11110 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11111}
11112
11113
11114/**
11115 * Interface for HM and EM to clear the CR0[TS] bit.
11116 *
11117 * @returns Strict VBox status code.
11118 * @param pVCpu The cross context virtual CPU structure.
11119 * @param cbInstr The instruction length in bytes.
11120 *
11121 * @remarks In ring-0 not all of the state needs to be synced in.
11122 */
11123VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11124{
11125 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11126
11127 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11128 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11129 Assert(!pVCpu->iem.s.cActiveMappings);
11130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11131}
11132
11133
11134/**
11135 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11136 *
11137 * @returns Strict VBox status code.
11138 * @param pVCpu The cross context virtual CPU structure.
11139 * @param cbInstr The instruction length in bytes.
11140 * @param uValue The value to load into CR0.
11141 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11142 * memory operand. Otherwise pass NIL_RTGCPTR.
11143 *
11144 * @remarks In ring-0 not all of the state needs to be synced in.
11145 */
11146VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11147{
11148 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11149
11150 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11151 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11152 Assert(!pVCpu->iem.s.cActiveMappings);
11153 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11154}
11155
11156
11157/**
11158 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11159 *
11160 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11161 *
11162 * @returns Strict VBox status code.
11163 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11164 * @param cbInstr The instruction length in bytes.
11165 * @remarks In ring-0 not all of the state needs to be synced in.
11166 * @thread EMT(pVCpu)
11167 */
11168VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11169{
11170 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11171
11172 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11174 Assert(!pVCpu->iem.s.cActiveMappings);
11175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11176}
11177
11178
11179/**
11180 * Interface for HM and EM to emulate the WBINVD instruction.
11181 *
11182 * @returns Strict VBox status code.
11183 * @param pVCpu The cross context virtual CPU structure.
11184 * @param cbInstr The instruction length in bytes.
11185 *
11186 * @remarks In ring-0 not all of the state needs to be synced in.
11187 */
11188VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11189{
11190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11191
11192 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11193 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11194 Assert(!pVCpu->iem.s.cActiveMappings);
11195 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11196}
11197
11198
11199/**
11200 * Interface for HM and EM to emulate the INVD instruction.
11201 *
11202 * @returns Strict VBox status code.
11203 * @param pVCpu The cross context virtual CPU structure.
11204 * @param cbInstr The instruction length in bytes.
11205 *
11206 * @remarks In ring-0 not all of the state needs to be synced in.
11207 */
11208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11209{
11210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11211
11212 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11213 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11214 Assert(!pVCpu->iem.s.cActiveMappings);
11215 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11216}
11217
11218
11219/**
11220 * Interface for HM and EM to emulate the INVLPG instruction.
11221 *
11222 * @returns Strict VBox status code.
11223 * @retval VINF_PGM_SYNC_CR3
11224 *
11225 * @param pVCpu The cross context virtual CPU structure.
11226 * @param cbInstr The instruction length in bytes.
11227 * @param GCPtrPage The effective address of the page to invalidate.
11228 *
11229 * @remarks In ring-0 not all of the state needs to be synced in.
11230 */
11231VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11232{
11233 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11234
11235 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11236 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11237 Assert(!pVCpu->iem.s.cActiveMappings);
11238 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11239}
11240
11241
11242/**
11243 * Interface for HM and EM to emulate the INVPCID instruction.
11244 *
11245 * @returns Strict VBox status code.
11246 * @retval VINF_PGM_SYNC_CR3
11247 *
11248 * @param pVCpu The cross context virtual CPU structure.
11249 * @param cbInstr The instruction length in bytes.
11250 * @param iEffSeg The effective segment register.
11251 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11252 * @param uType The invalidation type.
11253 *
11254 * @remarks In ring-0 not all of the state needs to be synced in.
11255 */
11256VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11257 uint64_t uType)
11258{
11259 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11260
11261 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11262 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11263 Assert(!pVCpu->iem.s.cActiveMappings);
11264 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11265}
11266
11267
11268/**
11269 * Interface for HM and EM to emulate the CPUID instruction.
11270 *
11271 * @returns Strict VBox status code.
11272 *
11273 * @param pVCpu The cross context virtual CPU structure.
11274 * @param cbInstr The instruction length in bytes.
11275 *
11276 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11277 */
11278VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11279{
11280 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11281 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11282
11283 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11284 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11285 Assert(!pVCpu->iem.s.cActiveMappings);
11286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11287}
11288
11289
11290/**
11291 * Interface for HM and EM to emulate the RDPMC instruction.
11292 *
11293 * @returns Strict VBox status code.
11294 *
11295 * @param pVCpu The cross context virtual CPU structure.
11296 * @param cbInstr The instruction length in bytes.
11297 *
11298 * @remarks Not all of the state needs to be synced in.
11299 */
11300VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11301{
11302 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11303 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11304
11305 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11306 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11307 Assert(!pVCpu->iem.s.cActiveMappings);
11308 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11309}
11310
11311
11312/**
11313 * Interface for HM and EM to emulate the RDTSC instruction.
11314 *
11315 * @returns Strict VBox status code.
11316 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11317 *
11318 * @param pVCpu The cross context virtual CPU structure.
11319 * @param cbInstr The instruction length in bytes.
11320 *
11321 * @remarks Not all of the state needs to be synced in.
11322 */
11323VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11324{
11325 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11326 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11327
11328 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11329 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11330 Assert(!pVCpu->iem.s.cActiveMappings);
11331 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11332}
11333
11334
11335/**
11336 * Interface for HM and EM to emulate the RDTSCP instruction.
11337 *
11338 * @returns Strict VBox status code.
11339 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11340 *
11341 * @param pVCpu The cross context virtual CPU structure.
11342 * @param cbInstr The instruction length in bytes.
11343 *
11344 * @remarks Not all of the state needs to be synced in. Recommended
11345 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11346 */
11347VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11348{
11349 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11350 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11351
11352 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11353 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11354 Assert(!pVCpu->iem.s.cActiveMappings);
11355 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11356}
11357
11358
11359/**
11360 * Interface for HM and EM to emulate the RDMSR instruction.
11361 *
11362 * @returns Strict VBox status code.
11363 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11364 *
11365 * @param pVCpu The cross context virtual CPU structure.
11366 * @param cbInstr The instruction length in bytes.
11367 *
11368 * @remarks Not all of the state needs to be synced in. Requires RCX and
11369 * (currently) all MSRs.
11370 */
11371VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11372{
11373 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11374 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11375
11376 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11377 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11378 Assert(!pVCpu->iem.s.cActiveMappings);
11379 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11380}
11381
11382
11383/**
11384 * Interface for HM and EM to emulate the WRMSR instruction.
11385 *
11386 * @returns Strict VBox status code.
11387 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11388 *
11389 * @param pVCpu The cross context virtual CPU structure.
11390 * @param cbInstr The instruction length in bytes.
11391 *
11392 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11393 * and (currently) all MSRs.
11394 */
11395VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11396{
11397 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11398 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11399 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11400
11401 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11402 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11403 Assert(!pVCpu->iem.s.cActiveMappings);
11404 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11405}
11406
11407
11408/**
11409 * Interface for HM and EM to emulate the MONITOR instruction.
11410 *
11411 * @returns Strict VBox status code.
11412 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11413 *
11414 * @param pVCpu The cross context virtual CPU structure.
11415 * @param cbInstr The instruction length in bytes.
11416 *
11417 * @remarks Not all of the state needs to be synced in.
11418 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11419 * are used.
11420 */
11421VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11422{
11423 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11424 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11425
11426 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11427 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11428 Assert(!pVCpu->iem.s.cActiveMappings);
11429 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11430}
11431
11432
11433/**
11434 * Interface for HM and EM to emulate the MWAIT instruction.
11435 *
11436 * @returns Strict VBox status code.
11437 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11438 *
11439 * @param pVCpu The cross context virtual CPU structure.
11440 * @param cbInstr The instruction length in bytes.
11441 *
11442 * @remarks Not all of the state needs to be synced in.
11443 */
11444VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11445{
11446 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11447 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11448
11449 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11450 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11451 Assert(!pVCpu->iem.s.cActiveMappings);
11452 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11453}
11454
11455
11456/**
11457 * Interface for HM and EM to emulate the HLT instruction.
11458 *
11459 * @returns Strict VBox status code.
11460 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11461 *
11462 * @param pVCpu The cross context virtual CPU structure.
11463 * @param cbInstr The instruction length in bytes.
11464 *
11465 * @remarks Not all of the state needs to be synced in.
11466 */
11467VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11468{
11469 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11470
11471 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11472 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11473 Assert(!pVCpu->iem.s.cActiveMappings);
11474 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11475}
11476
11477
11478/**
11479 * Checks if IEM is in the process of delivering an event (interrupt or
11480 * exception).
11481 *
11482 * @returns true if we're in the process of raising an interrupt or exception,
11483 * false otherwise.
11484 * @param pVCpu The cross context virtual CPU structure.
11485 * @param puVector Where to store the vector associated with the
11486 * currently delivered event, optional.
11487 * @param pfFlags Where to store th event delivery flags (see
11488 * IEM_XCPT_FLAGS_XXX), optional.
11489 * @param puErr Where to store the error code associated with the
11490 * event, optional.
11491 * @param puCr2 Where to store the CR2 associated with the event,
11492 * optional.
11493 * @remarks The caller should check the flags to determine if the error code and
11494 * CR2 are valid for the event.
11495 */
11496VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11497{
11498 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11499 if (fRaisingXcpt)
11500 {
11501 if (puVector)
11502 *puVector = pVCpu->iem.s.uCurXcpt;
11503 if (pfFlags)
11504 *pfFlags = pVCpu->iem.s.fCurXcpt;
11505 if (puErr)
11506 *puErr = pVCpu->iem.s.uCurXcptErr;
11507 if (puCr2)
11508 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11509 }
11510 return fRaisingXcpt;
11511}
11512
11513#ifdef IN_RING3
11514
11515/**
11516 * Handles the unlikely and probably fatal merge cases.
11517 *
11518 * @returns Merged status code.
11519 * @param rcStrict Current EM status code.
11520 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11521 * with @a rcStrict.
11522 * @param iMemMap The memory mapping index. For error reporting only.
11523 * @param pVCpu The cross context virtual CPU structure of the calling
11524 * thread, for error reporting only.
11525 */
11526DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11527 unsigned iMemMap, PVMCPUCC pVCpu)
11528{
11529 if (RT_FAILURE_NP(rcStrict))
11530 return rcStrict;
11531
11532 if (RT_FAILURE_NP(rcStrictCommit))
11533 return rcStrictCommit;
11534
11535 if (rcStrict == rcStrictCommit)
11536 return rcStrictCommit;
11537
11538 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11539 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11540 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11541 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11542 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11543 return VERR_IOM_FF_STATUS_IPE;
11544}
11545
11546
11547/**
11548 * Helper for IOMR3ProcessForceFlag.
11549 *
11550 * @returns Merged status code.
11551 * @param rcStrict Current EM status code.
11552 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11553 * with @a rcStrict.
11554 * @param iMemMap The memory mapping index. For error reporting only.
11555 * @param pVCpu The cross context virtual CPU structure of the calling
11556 * thread, for error reporting only.
11557 */
11558DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11559{
11560 /* Simple. */
11561 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11562 return rcStrictCommit;
11563
11564 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11565 return rcStrict;
11566
11567 /* EM scheduling status codes. */
11568 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11569 && rcStrict <= VINF_EM_LAST))
11570 {
11571 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11572 && rcStrictCommit <= VINF_EM_LAST))
11573 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11574 }
11575
11576 /* Unlikely */
11577 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11578}
11579
11580
11581/**
11582 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11583 *
11584 * @returns Merge between @a rcStrict and what the commit operation returned.
11585 * @param pVM The cross context VM structure.
11586 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11587 * @param rcStrict The status code returned by ring-0 or raw-mode.
11588 */
11589VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11590{
11591 /*
11592 * Reset the pending commit.
11593 */
11594 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11595 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11596 ("%#x %#x %#x\n",
11597 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11598 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11599
11600 /*
11601 * Commit the pending bounce buffers (usually just one).
11602 */
11603 unsigned cBufs = 0;
11604 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11605 while (iMemMap-- > 0)
11606 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11607 {
11608 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11609 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11610 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11611
11612 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11613 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11614 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11615
11616 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11617 {
11618 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11619 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11620 pbBuf,
11621 cbFirst,
11622 PGMACCESSORIGIN_IEM);
11623 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11624 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11625 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11626 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11627 }
11628
11629 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11630 {
11631 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11632 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11633 pbBuf + cbFirst,
11634 cbSecond,
11635 PGMACCESSORIGIN_IEM);
11636 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11637 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11638 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11639 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11640 }
11641 cBufs++;
11642 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11643 }
11644
11645 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11646 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11647 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11648 pVCpu->iem.s.cActiveMappings = 0;
11649 return rcStrict;
11650}
11651
11652#endif /* IN_RING3 */
11653
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette