VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100108

Last change on this file since 100108 was 100060, checked in by vboxsync, 18 months ago

VMM/IEM: todo on iemFpuUpdateDP

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 482.6 KB
Line 
1/* $Id: IEMAll.cpp 100060 2023-06-03 00:18:33Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <iprt/asm-math.h>
130#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
131# include <iprt/asm-amd64-x86.h>
132#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
133# include <iprt/asm-arm.h>
134#endif
135#include <iprt/assert.h>
136#include <iprt/string.h>
137#include <iprt/x86.h>
138
139#include "IEMInline.h"
140
141
142/*********************************************************************************************************************************
143* Structures and Typedefs *
144*********************************************************************************************************************************/
145/**
146 * CPU exception classes.
147 */
148typedef enum IEMXCPTCLASS
149{
150 IEMXCPTCLASS_BENIGN,
151 IEMXCPTCLASS_CONTRIBUTORY,
152 IEMXCPTCLASS_PAGE_FAULT,
153 IEMXCPTCLASS_DOUBLE_FAULT
154} IEMXCPTCLASS;
155
156
157/*********************************************************************************************************************************
158* Global Variables *
159*********************************************************************************************************************************/
160#if defined(IEM_LOG_MEMORY_WRITES)
161/** What IEM just wrote. */
162uint8_t g_abIemWrote[256];
163/** How much IEM just wrote. */
164size_t g_cbIemWrote;
165#endif
166
167
168/*********************************************************************************************************************************
169* Internal Functions *
170*********************************************************************************************************************************/
171static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
172 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
173
174
175/**
176 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
177 * path.
178 *
179 * @returns IEM_F_BRK_PENDING_XXX or zero.
180 * @param pVCpu The cross context virtual CPU structure of the
181 * calling thread.
182 *
183 * @note Don't call directly, use iemCalcExecDbgFlags instead.
184 */
185uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
186{
187 uint32_t fExec = 0;
188
189 /*
190 * Process guest breakpoints.
191 */
192#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
193 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
194 { \
195 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
196 { \
197 case X86_DR7_RW_EO: \
198 fExec |= IEM_F_PENDING_BRK_INSTR; \
199 break; \
200 case X86_DR7_RW_WO: \
201 case X86_DR7_RW_RW: \
202 fExec |= IEM_F_PENDING_BRK_DATA; \
203 break; \
204 case X86_DR7_RW_IO: \
205 fExec |= IEM_F_PENDING_BRK_X86_IO; \
206 break; \
207 } \
208 } \
209 } while (0)
210
211 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
212 if (fGstDr7 & X86_DR7_ENABLED_MASK)
213 {
214 PROCESS_ONE_BP(fGstDr7, 0);
215 PROCESS_ONE_BP(fGstDr7, 1);
216 PROCESS_ONE_BP(fGstDr7, 2);
217 PROCESS_ONE_BP(fGstDr7, 3);
218 }
219
220 /*
221 * Process hypervisor breakpoints.
222 */
223 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
224 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
225 {
226 PROCESS_ONE_BP(fHyperDr7, 0);
227 PROCESS_ONE_BP(fHyperDr7, 1);
228 PROCESS_ONE_BP(fHyperDr7, 2);
229 PROCESS_ONE_BP(fHyperDr7, 3);
230 }
231
232 return fExec;
233}
234
235
236/**
237 * Initializes the decoder state.
238 *
239 * iemReInitDecoder is mostly a copy of this function.
240 *
241 * @param pVCpu The cross context virtual CPU structure of the
242 * calling thread.
243 * @param fExecOpts Optional execution flags:
244 * - IEM_F_BYPASS_HANDLERS
245 * - IEM_F_X86_DISREGARD_LOCK
246 */
247DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
248{
249 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
250 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
259
260 /* Execution state: */
261 uint32_t fExec;
262 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
263
264 /* Decoder state: */
265 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
267 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
268 {
269 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
270 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
271 }
272 else
273 {
274 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
275 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
276 }
277 pVCpu->iem.s.fPrefixes = 0;
278 pVCpu->iem.s.uRexReg = 0;
279 pVCpu->iem.s.uRexB = 0;
280 pVCpu->iem.s.uRexIndex = 0;
281 pVCpu->iem.s.idxPrefix = 0;
282 pVCpu->iem.s.uVex3rdReg = 0;
283 pVCpu->iem.s.uVexLength = 0;
284 pVCpu->iem.s.fEvexStuff = 0;
285 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
286#ifdef IEM_WITH_CODE_TLB
287 pVCpu->iem.s.pbInstrBuf = NULL;
288 pVCpu->iem.s.offInstrNextByte = 0;
289 pVCpu->iem.s.offCurInstrStart = 0;
290# ifdef VBOX_STRICT
291 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
292 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
293 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
294 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
295# endif
296#else
297 pVCpu->iem.s.offOpcode = 0;
298 pVCpu->iem.s.cbOpcode = 0;
299#endif
300 pVCpu->iem.s.offModRm = 0;
301 pVCpu->iem.s.cActiveMappings = 0;
302 pVCpu->iem.s.iNextMapping = 0;
303 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
304
305#ifdef DBGFTRACE_ENABLED
306 switch (IEM_GET_CPU_MODE(pVCpu))
307 {
308 case IEMMODE_64BIT:
309 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
310 break;
311 case IEMMODE_32BIT:
312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
313 break;
314 case IEMMODE_16BIT:
315 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
316 break;
317 }
318#endif
319}
320
321
322/**
323 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
324 *
325 * This is mostly a copy of iemInitDecoder.
326 *
327 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
328 */
329DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
330{
331 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
332 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
333 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
339 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
340
341 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
342 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
343 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
344
345 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
346 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
347 pVCpu->iem.s.enmEffAddrMode = enmMode;
348 if (enmMode != IEMMODE_64BIT)
349 {
350 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
351 pVCpu->iem.s.enmEffOpSize = enmMode;
352 }
353 else
354 {
355 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
356 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
357 }
358 pVCpu->iem.s.fPrefixes = 0;
359 pVCpu->iem.s.uRexReg = 0;
360 pVCpu->iem.s.uRexB = 0;
361 pVCpu->iem.s.uRexIndex = 0;
362 pVCpu->iem.s.idxPrefix = 0;
363 pVCpu->iem.s.uVex3rdReg = 0;
364 pVCpu->iem.s.uVexLength = 0;
365 pVCpu->iem.s.fEvexStuff = 0;
366 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
367#ifdef IEM_WITH_CODE_TLB
368 if (pVCpu->iem.s.pbInstrBuf)
369 {
370 uint64_t off = (enmMode == IEMMODE_64BIT
371 ? pVCpu->cpum.GstCtx.rip
372 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
373 - pVCpu->iem.s.uInstrBufPc;
374 if (off < pVCpu->iem.s.cbInstrBufTotal)
375 {
376 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
377 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
378 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
379 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
380 else
381 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
382 }
383 else
384 {
385 pVCpu->iem.s.pbInstrBuf = NULL;
386 pVCpu->iem.s.offInstrNextByte = 0;
387 pVCpu->iem.s.offCurInstrStart = 0;
388 pVCpu->iem.s.cbInstrBuf = 0;
389 pVCpu->iem.s.cbInstrBufTotal = 0;
390 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
391 }
392 }
393 else
394 {
395 pVCpu->iem.s.offInstrNextByte = 0;
396 pVCpu->iem.s.offCurInstrStart = 0;
397 pVCpu->iem.s.cbInstrBuf = 0;
398 pVCpu->iem.s.cbInstrBufTotal = 0;
399# ifdef VBOX_STRICT
400 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
401# endif
402 }
403#else
404 pVCpu->iem.s.cbOpcode = 0;
405 pVCpu->iem.s.offOpcode = 0;
406#endif
407 pVCpu->iem.s.offModRm = 0;
408 Assert(pVCpu->iem.s.cActiveMappings == 0);
409 pVCpu->iem.s.iNextMapping = 0;
410 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
411 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
412
413#ifdef DBGFTRACE_ENABLED
414 switch (enmMode)
415 {
416 case IEMMODE_64BIT:
417 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
418 break;
419 case IEMMODE_32BIT:
420 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
421 break;
422 case IEMMODE_16BIT:
423 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
424 break;
425 }
426#endif
427}
428
429
430
431/**
432 * Prefetch opcodes the first time when starting executing.
433 *
434 * @returns Strict VBox status code.
435 * @param pVCpu The cross context virtual CPU structure of the
436 * calling thread.
437 * @param fExecOpts Optional execution flags:
438 * - IEM_F_BYPASS_HANDLERS
439 * - IEM_F_X86_DISREGARD_LOCK
440 */
441static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
442{
443 iemInitDecoder(pVCpu, fExecOpts);
444
445#ifndef IEM_WITH_CODE_TLB
446 /*
447 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
448 *
449 * First translate CS:rIP to a physical address.
450 *
451 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
452 * all relevant bytes from the first page, as it ASSUMES it's only ever
453 * called for dealing with CS.LIM, page crossing and instructions that
454 * are too long.
455 */
456 uint32_t cbToTryRead;
457 RTGCPTR GCPtrPC;
458 if (IEM_IS_64BIT_CODE(pVCpu))
459 {
460 cbToTryRead = GUEST_PAGE_SIZE;
461 GCPtrPC = pVCpu->cpum.GstCtx.rip;
462 if (IEM_IS_CANONICAL(GCPtrPC))
463 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
464 else
465 return iemRaiseGeneralProtectionFault0(pVCpu);
466 }
467 else
468 {
469 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
470 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
471 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
472 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
473 else
474 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
475 if (cbToTryRead) { /* likely */ }
476 else /* overflowed */
477 {
478 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
479 cbToTryRead = UINT32_MAX;
480 }
481 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
482 Assert(GCPtrPC <= UINT32_MAX);
483 }
484
485 PGMPTWALK Walk;
486 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
487 if (RT_SUCCESS(rc))
488 Assert(Walk.fSucceeded); /* probable. */
489 else
490 {
491 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
492# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
493 if (Walk.fFailed & PGM_WALKFAIL_EPT)
494 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
495# endif
496 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
497 }
498 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
499 else
500 {
501 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
502# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
503 if (Walk.fFailed & PGM_WALKFAIL_EPT)
504 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
505# endif
506 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
507 }
508 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
509 else
510 {
511 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
512# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
513 if (Walk.fFailed & PGM_WALKFAIL_EPT)
514 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
515# endif
516 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
517 }
518 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
519 /** @todo Check reserved bits and such stuff. PGM is better at doing
520 * that, so do it when implementing the guest virtual address
521 * TLB... */
522
523 /*
524 * Read the bytes at this address.
525 */
526 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
527 if (cbToTryRead > cbLeftOnPage)
528 cbToTryRead = cbLeftOnPage;
529 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
530 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
531
532 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
533 {
534 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
535 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
536 { /* likely */ }
537 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
538 {
539 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
540 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
541 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
542 }
543 else
544 {
545 Log((RT_SUCCESS(rcStrict)
546 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
547 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
548 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
549 return rcStrict;
550 }
551 }
552 else
553 {
554 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
555 if (RT_SUCCESS(rc))
556 { /* likely */ }
557 else
558 {
559 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
560 GCPtrPC, GCPhys, rc, cbToTryRead));
561 return rc;
562 }
563 }
564 pVCpu->iem.s.cbOpcode = cbToTryRead;
565#endif /* !IEM_WITH_CODE_TLB */
566 return VINF_SUCCESS;
567}
568
569
570/**
571 * Invalidates the IEM TLBs.
572 *
573 * This is called internally as well as by PGM when moving GC mappings.
574 *
575 * @param pVCpu The cross context virtual CPU structure of the calling
576 * thread.
577 */
578VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
579{
580#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
581 Log10(("IEMTlbInvalidateAll\n"));
582# ifdef IEM_WITH_CODE_TLB
583 pVCpu->iem.s.cbInstrBufTotal = 0;
584 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
585 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
586 { /* very likely */ }
587 else
588 {
589 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
590 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
591 while (i-- > 0)
592 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
593 }
594# endif
595
596# ifdef IEM_WITH_DATA_TLB
597 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
598 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
599 { /* very likely */ }
600 else
601 {
602 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
603 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
604 while (i-- > 0)
605 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
606 }
607# endif
608#else
609 RT_NOREF(pVCpu);
610#endif
611}
612
613
614/**
615 * Invalidates a page in the TLBs.
616 *
617 * @param pVCpu The cross context virtual CPU structure of the calling
618 * thread.
619 * @param GCPtr The address of the page to invalidate
620 * @thread EMT(pVCpu)
621 */
622VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
623{
624#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
625 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
626 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
627 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
628 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
629
630# ifdef IEM_WITH_CODE_TLB
631 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
632 {
633 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
634 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
635 pVCpu->iem.s.cbInstrBufTotal = 0;
636 }
637# endif
638
639# ifdef IEM_WITH_DATA_TLB
640 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
641 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
642# endif
643#else
644 NOREF(pVCpu); NOREF(GCPtr);
645#endif
646}
647
648
649#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
650/**
651 * Invalid both TLBs slow fashion following a rollover.
652 *
653 * Worker for IEMTlbInvalidateAllPhysical,
654 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
655 * iemMemMapJmp and others.
656 *
657 * @thread EMT(pVCpu)
658 */
659static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
660{
661 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
662 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
663 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
664
665 unsigned i;
666# ifdef IEM_WITH_CODE_TLB
667 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
668 while (i-- > 0)
669 {
670 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
671 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
672 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
673 }
674# endif
675# ifdef IEM_WITH_DATA_TLB
676 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
677 while (i-- > 0)
678 {
679 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
680 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
681 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
682 }
683# endif
684
685}
686#endif
687
688
689/**
690 * Invalidates the host physical aspects of the IEM TLBs.
691 *
692 * This is called internally as well as by PGM when moving GC mappings.
693 *
694 * @param pVCpu The cross context virtual CPU structure of the calling
695 * thread.
696 * @note Currently not used.
697 */
698VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
699{
700#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
701 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
702 Log10(("IEMTlbInvalidateAllPhysical\n"));
703
704# ifdef IEM_WITH_CODE_TLB
705 pVCpu->iem.s.cbInstrBufTotal = 0;
706# endif
707 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
708 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
709 {
710 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
711 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
712 }
713 else
714 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
715#else
716 NOREF(pVCpu);
717#endif
718}
719
720
721/**
722 * Invalidates the host physical aspects of the IEM TLBs.
723 *
724 * This is called internally as well as by PGM when moving GC mappings.
725 *
726 * @param pVM The cross context VM structure.
727 * @param idCpuCaller The ID of the calling EMT if available to the caller,
728 * otherwise NIL_VMCPUID.
729 *
730 * @remarks Caller holds the PGM lock.
731 */
732VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
733{
734#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
735 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
736 if (pVCpuCaller)
737 VMCPU_ASSERT_EMT(pVCpuCaller);
738 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
739
740 VMCC_FOR_EACH_VMCPU(pVM)
741 {
742# ifdef IEM_WITH_CODE_TLB
743 if (pVCpuCaller == pVCpu)
744 pVCpu->iem.s.cbInstrBufTotal = 0;
745# endif
746
747 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
748 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
749 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
750 { /* likely */}
751 else if (pVCpuCaller == pVCpu)
752 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
753 else
754 {
755 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
756 continue;
757 }
758 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
759 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
760 }
761 VMCC_FOR_EACH_VMCPU_END(pVM);
762
763#else
764 RT_NOREF(pVM, idCpuCaller);
765#endif
766}
767
768
769/**
770 * Flushes the prefetch buffer, light version.
771 */
772void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
773{
774#ifndef IEM_WITH_CODE_TLB
775 pVCpu->iem.s.cbOpcode = cbInstr;
776#else
777 RT_NOREF(pVCpu, cbInstr);
778#endif
779}
780
781
782/**
783 * Flushes the prefetch buffer, heavy version.
784 */
785void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
786{
787#ifndef IEM_WITH_CODE_TLB
788 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
789#elif 1
790 pVCpu->iem.s.pbInstrBuf = NULL;
791 RT_NOREF(cbInstr);
792#else
793 RT_NOREF(pVCpu, cbInstr);
794#endif
795}
796
797
798
799#ifdef IEM_WITH_CODE_TLB
800
801/**
802 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
803 * failure and jumps.
804 *
805 * We end up here for a number of reasons:
806 * - pbInstrBuf isn't yet initialized.
807 * - Advancing beyond the buffer boundrary (e.g. cross page).
808 * - Advancing beyond the CS segment limit.
809 * - Fetching from non-mappable page (e.g. MMIO).
810 *
811 * @param pVCpu The cross context virtual CPU structure of the
812 * calling thread.
813 * @param pvDst Where to return the bytes.
814 * @param cbDst Number of bytes to read.
815 *
816 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
817 */
818void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
819{
820# ifdef IN_RING3
821 for (;;)
822 {
823 Assert(cbDst <= 8);
824 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
825
826 /*
827 * We might have a partial buffer match, deal with that first to make the
828 * rest simpler. This is the first part of the cross page/buffer case.
829 */
830 if (pVCpu->iem.s.pbInstrBuf != NULL)
831 {
832 if (offBuf < pVCpu->iem.s.cbInstrBuf)
833 {
834 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
835 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
836 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
837
838 cbDst -= cbCopy;
839 pvDst = (uint8_t *)pvDst + cbCopy;
840 offBuf += cbCopy;
841 pVCpu->iem.s.offInstrNextByte += offBuf;
842 }
843 }
844
845 /*
846 * Check segment limit, figuring how much we're allowed to access at this point.
847 *
848 * We will fault immediately if RIP is past the segment limit / in non-canonical
849 * territory. If we do continue, there are one or more bytes to read before we
850 * end up in trouble and we need to do that first before faulting.
851 */
852 RTGCPTR GCPtrFirst;
853 uint32_t cbMaxRead;
854 if (IEM_IS_64BIT_CODE(pVCpu))
855 {
856 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
857 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
858 { /* likely */ }
859 else
860 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
861 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
862 }
863 else
864 {
865 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
866 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
867 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
868 { /* likely */ }
869 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
870 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
871 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
872 if (cbMaxRead != 0)
873 { /* likely */ }
874 else
875 {
876 /* Overflowed because address is 0 and limit is max. */
877 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
878 cbMaxRead = X86_PAGE_SIZE;
879 }
880 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
881 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
882 if (cbMaxRead2 < cbMaxRead)
883 cbMaxRead = cbMaxRead2;
884 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
885 }
886
887 /*
888 * Get the TLB entry for this piece of code.
889 */
890 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
891 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
892 if (pTlbe->uTag == uTag)
893 {
894 /* likely when executing lots of code, otherwise unlikely */
895# ifdef VBOX_WITH_STATISTICS
896 pVCpu->iem.s.CodeTlb.cTlbHits++;
897# endif
898 }
899 else
900 {
901 pVCpu->iem.s.CodeTlb.cTlbMisses++;
902 PGMPTWALK Walk;
903 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
904 if (RT_FAILURE(rc))
905 {
906#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
907 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
908 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
909#endif
910 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
911 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
912 }
913
914 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
915 Assert(Walk.fSucceeded);
916 pTlbe->uTag = uTag;
917 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
918 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
919 pTlbe->GCPhys = Walk.GCPhys;
920 pTlbe->pbMappingR3 = NULL;
921 }
922
923 /*
924 * Check TLB page table level access flags.
925 */
926 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
927 {
928 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
929 {
930 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
931 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
932 }
933 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
934 {
935 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
936 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
937 }
938 }
939
940 /*
941 * Look up the physical page info if necessary.
942 */
943 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
944 { /* not necessary */ }
945 else
946 {
947 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
948 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
949 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
950 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
951 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
952 { /* likely */ }
953 else
954 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
955 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
956 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
957 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
958 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
959 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
960 }
961
962# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
963 /*
964 * Try do a direct read using the pbMappingR3 pointer.
965 */
966 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
967 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
968 {
969 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
970 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
971 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
972 {
973 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
974 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
975 }
976 else
977 {
978 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
979 if (cbInstr + (uint32_t)cbDst <= 15)
980 {
981 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
982 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
983 }
984 else
985 {
986 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
987 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
988 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
989 }
990 }
991 if (cbDst <= cbMaxRead)
992 {
993 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
994 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
995 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
996 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
997 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
998 return;
999 }
1000 pVCpu->iem.s.pbInstrBuf = NULL;
1001
1002 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1003 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1004 }
1005# else
1006# error "refactor as needed"
1007 /*
1008 * If there is no special read handling, so we can read a bit more and
1009 * put it in the prefetch buffer.
1010 */
1011 if ( cbDst < cbMaxRead
1012 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1013 {
1014 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1015 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1016 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1017 { /* likely */ }
1018 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1019 {
1020 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1021 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1022 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1023 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1024 }
1025 else
1026 {
1027 Log((RT_SUCCESS(rcStrict)
1028 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1029 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1030 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1031 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1032 }
1033 }
1034# endif
1035 /*
1036 * Special read handling, so only read exactly what's needed.
1037 * This is a highly unlikely scenario.
1038 */
1039 else
1040 {
1041 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1042
1043 /* Check instruction length. */
1044 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1045 if (RT_LIKELY(cbInstr + cbDst <= 15))
1046 { /* likely */ }
1047 else
1048 {
1049 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1050 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1051 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1052 }
1053
1054 /* Do the reading. */
1055 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1056 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1057 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1058 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1059 { /* likely */ }
1060 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1061 {
1062 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1063 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1064 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1065 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1066 }
1067 else
1068 {
1069 Log((RT_SUCCESS(rcStrict)
1070 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1071 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1072 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1073 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1074 }
1075 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1076 if (cbToRead == cbDst)
1077 return;
1078 }
1079
1080 /*
1081 * More to read, loop.
1082 */
1083 cbDst -= cbMaxRead;
1084 pvDst = (uint8_t *)pvDst + cbMaxRead;
1085 }
1086# else /* !IN_RING3 */
1087 RT_NOREF(pvDst, cbDst);
1088 if (pvDst || cbDst)
1089 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1090# endif /* !IN_RING3 */
1091}
1092
1093#else
1094
1095/**
1096 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1097 * exception if it fails.
1098 *
1099 * @returns Strict VBox status code.
1100 * @param pVCpu The cross context virtual CPU structure of the
1101 * calling thread.
1102 * @param cbMin The minimum number of bytes relative offOpcode
1103 * that must be read.
1104 */
1105VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1106{
1107 /*
1108 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1109 *
1110 * First translate CS:rIP to a physical address.
1111 */
1112 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1113 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1114 uint8_t const cbLeft = cbOpcode - offOpcode;
1115 Assert(cbLeft < cbMin);
1116 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1117
1118 uint32_t cbToTryRead;
1119 RTGCPTR GCPtrNext;
1120 if (IEM_IS_64BIT_CODE(pVCpu))
1121 {
1122 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1123 if (!IEM_IS_CANONICAL(GCPtrNext))
1124 return iemRaiseGeneralProtectionFault0(pVCpu);
1125 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1126 }
1127 else
1128 {
1129 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1130 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1131 GCPtrNext32 += cbOpcode;
1132 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1133 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1134 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1135 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1136 if (!cbToTryRead) /* overflowed */
1137 {
1138 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1139 cbToTryRead = UINT32_MAX;
1140 /** @todo check out wrapping around the code segment. */
1141 }
1142 if (cbToTryRead < cbMin - cbLeft)
1143 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1144 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1145
1146 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1147 if (cbToTryRead > cbLeftOnPage)
1148 cbToTryRead = cbLeftOnPage;
1149 }
1150
1151 /* Restrict to opcode buffer space.
1152
1153 We're making ASSUMPTIONS here based on work done previously in
1154 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1155 be fetched in case of an instruction crossing two pages. */
1156 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1157 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1158 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1159 { /* likely */ }
1160 else
1161 {
1162 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1163 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1164 return iemRaiseGeneralProtectionFault0(pVCpu);
1165 }
1166
1167 PGMPTWALK Walk;
1168 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1169 if (RT_FAILURE(rc))
1170 {
1171 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1172#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1173 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1174 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1175#endif
1176 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1177 }
1178 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1179 {
1180 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1181#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1182 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1183 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1184#endif
1185 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1186 }
1187 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1188 {
1189 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1190#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1191 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1192 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1193#endif
1194 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1195 }
1196 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1197 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1198 /** @todo Check reserved bits and such stuff. PGM is better at doing
1199 * that, so do it when implementing the guest virtual address
1200 * TLB... */
1201
1202 /*
1203 * Read the bytes at this address.
1204 *
1205 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1206 * and since PATM should only patch the start of an instruction there
1207 * should be no need to check again here.
1208 */
1209 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1210 {
1211 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1212 cbToTryRead, PGMACCESSORIGIN_IEM);
1213 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1214 { /* likely */ }
1215 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1216 {
1217 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1218 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1219 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1220 }
1221 else
1222 {
1223 Log((RT_SUCCESS(rcStrict)
1224 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1225 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1226 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1227 return rcStrict;
1228 }
1229 }
1230 else
1231 {
1232 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1233 if (RT_SUCCESS(rc))
1234 { /* likely */ }
1235 else
1236 {
1237 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1238 return rc;
1239 }
1240 }
1241 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1242 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1243
1244 return VINF_SUCCESS;
1245}
1246
1247#endif /* !IEM_WITH_CODE_TLB */
1248#ifndef IEM_WITH_SETJMP
1249
1250/**
1251 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1252 *
1253 * @returns Strict VBox status code.
1254 * @param pVCpu The cross context virtual CPU structure of the
1255 * calling thread.
1256 * @param pb Where to return the opcode byte.
1257 */
1258VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1259{
1260 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1261 if (rcStrict == VINF_SUCCESS)
1262 {
1263 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1264 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1265 pVCpu->iem.s.offOpcode = offOpcode + 1;
1266 }
1267 else
1268 *pb = 0;
1269 return rcStrict;
1270}
1271
1272#else /* IEM_WITH_SETJMP */
1273
1274/**
1275 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1276 *
1277 * @returns The opcode byte.
1278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1279 */
1280uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1281{
1282# ifdef IEM_WITH_CODE_TLB
1283 uint8_t u8;
1284 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1285 return u8;
1286# else
1287 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1288 if (rcStrict == VINF_SUCCESS)
1289 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1290 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1291# endif
1292}
1293
1294#endif /* IEM_WITH_SETJMP */
1295
1296#ifndef IEM_WITH_SETJMP
1297
1298/**
1299 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1300 *
1301 * @returns Strict VBox status code.
1302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1303 * @param pu16 Where to return the opcode dword.
1304 */
1305VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1306{
1307 uint8_t u8;
1308 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1309 if (rcStrict == VINF_SUCCESS)
1310 *pu16 = (int8_t)u8;
1311 return rcStrict;
1312}
1313
1314
1315/**
1316 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1317 *
1318 * @returns Strict VBox status code.
1319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1320 * @param pu32 Where to return the opcode dword.
1321 */
1322VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1323{
1324 uint8_t u8;
1325 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1326 if (rcStrict == VINF_SUCCESS)
1327 *pu32 = (int8_t)u8;
1328 return rcStrict;
1329}
1330
1331
1332/**
1333 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1334 *
1335 * @returns Strict VBox status code.
1336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1337 * @param pu64 Where to return the opcode qword.
1338 */
1339VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1340{
1341 uint8_t u8;
1342 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1343 if (rcStrict == VINF_SUCCESS)
1344 *pu64 = (int8_t)u8;
1345 return rcStrict;
1346}
1347
1348#endif /* !IEM_WITH_SETJMP */
1349
1350
1351#ifndef IEM_WITH_SETJMP
1352
1353/**
1354 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1355 *
1356 * @returns Strict VBox status code.
1357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1358 * @param pu16 Where to return the opcode word.
1359 */
1360VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1361{
1362 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1363 if (rcStrict == VINF_SUCCESS)
1364 {
1365 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1366# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1367 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1368# else
1369 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1370# endif
1371 pVCpu->iem.s.offOpcode = offOpcode + 2;
1372 }
1373 else
1374 *pu16 = 0;
1375 return rcStrict;
1376}
1377
1378#else /* IEM_WITH_SETJMP */
1379
1380/**
1381 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1382 *
1383 * @returns The opcode word.
1384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1385 */
1386uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1387{
1388# ifdef IEM_WITH_CODE_TLB
1389 uint16_t u16;
1390 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1391 return u16;
1392# else
1393 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1394 if (rcStrict == VINF_SUCCESS)
1395 {
1396 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1397 pVCpu->iem.s.offOpcode += 2;
1398# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1399 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1400# else
1401 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1402# endif
1403 }
1404 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1405# endif
1406}
1407
1408#endif /* IEM_WITH_SETJMP */
1409
1410#ifndef IEM_WITH_SETJMP
1411
1412/**
1413 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1414 *
1415 * @returns Strict VBox status code.
1416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1417 * @param pu32 Where to return the opcode double word.
1418 */
1419VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1420{
1421 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1422 if (rcStrict == VINF_SUCCESS)
1423 {
1424 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1425 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1426 pVCpu->iem.s.offOpcode = offOpcode + 2;
1427 }
1428 else
1429 *pu32 = 0;
1430 return rcStrict;
1431}
1432
1433
1434/**
1435 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1436 *
1437 * @returns Strict VBox status code.
1438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1439 * @param pu64 Where to return the opcode quad word.
1440 */
1441VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1442{
1443 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1444 if (rcStrict == VINF_SUCCESS)
1445 {
1446 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1447 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1448 pVCpu->iem.s.offOpcode = offOpcode + 2;
1449 }
1450 else
1451 *pu64 = 0;
1452 return rcStrict;
1453}
1454
1455#endif /* !IEM_WITH_SETJMP */
1456
1457#ifndef IEM_WITH_SETJMP
1458
1459/**
1460 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1461 *
1462 * @returns Strict VBox status code.
1463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1464 * @param pu32 Where to return the opcode dword.
1465 */
1466VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1467{
1468 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1469 if (rcStrict == VINF_SUCCESS)
1470 {
1471 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1472# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1473 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1474# else
1475 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1476 pVCpu->iem.s.abOpcode[offOpcode + 1],
1477 pVCpu->iem.s.abOpcode[offOpcode + 2],
1478 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1479# endif
1480 pVCpu->iem.s.offOpcode = offOpcode + 4;
1481 }
1482 else
1483 *pu32 = 0;
1484 return rcStrict;
1485}
1486
1487#else /* IEM_WITH_SETJMP */
1488
1489/**
1490 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1491 *
1492 * @returns The opcode dword.
1493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1494 */
1495uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1496{
1497# ifdef IEM_WITH_CODE_TLB
1498 uint32_t u32;
1499 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1500 return u32;
1501# else
1502 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1503 if (rcStrict == VINF_SUCCESS)
1504 {
1505 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1506 pVCpu->iem.s.offOpcode = offOpcode + 4;
1507# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1508 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1509# else
1510 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1511 pVCpu->iem.s.abOpcode[offOpcode + 1],
1512 pVCpu->iem.s.abOpcode[offOpcode + 2],
1513 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1514# endif
1515 }
1516 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1517# endif
1518}
1519
1520#endif /* IEM_WITH_SETJMP */
1521
1522#ifndef IEM_WITH_SETJMP
1523
1524/**
1525 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1526 *
1527 * @returns Strict VBox status code.
1528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1529 * @param pu64 Where to return the opcode dword.
1530 */
1531VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1532{
1533 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1534 if (rcStrict == VINF_SUCCESS)
1535 {
1536 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1537 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1538 pVCpu->iem.s.abOpcode[offOpcode + 1],
1539 pVCpu->iem.s.abOpcode[offOpcode + 2],
1540 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1541 pVCpu->iem.s.offOpcode = offOpcode + 4;
1542 }
1543 else
1544 *pu64 = 0;
1545 return rcStrict;
1546}
1547
1548
1549/**
1550 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1551 *
1552 * @returns Strict VBox status code.
1553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1554 * @param pu64 Where to return the opcode qword.
1555 */
1556VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1557{
1558 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1559 if (rcStrict == VINF_SUCCESS)
1560 {
1561 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1562 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1563 pVCpu->iem.s.abOpcode[offOpcode + 1],
1564 pVCpu->iem.s.abOpcode[offOpcode + 2],
1565 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1566 pVCpu->iem.s.offOpcode = offOpcode + 4;
1567 }
1568 else
1569 *pu64 = 0;
1570 return rcStrict;
1571}
1572
1573#endif /* !IEM_WITH_SETJMP */
1574
1575#ifndef IEM_WITH_SETJMP
1576
1577/**
1578 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1579 *
1580 * @returns Strict VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1582 * @param pu64 Where to return the opcode qword.
1583 */
1584VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1585{
1586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1587 if (rcStrict == VINF_SUCCESS)
1588 {
1589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1590# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1591 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1592# else
1593 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1594 pVCpu->iem.s.abOpcode[offOpcode + 1],
1595 pVCpu->iem.s.abOpcode[offOpcode + 2],
1596 pVCpu->iem.s.abOpcode[offOpcode + 3],
1597 pVCpu->iem.s.abOpcode[offOpcode + 4],
1598 pVCpu->iem.s.abOpcode[offOpcode + 5],
1599 pVCpu->iem.s.abOpcode[offOpcode + 6],
1600 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1601# endif
1602 pVCpu->iem.s.offOpcode = offOpcode + 8;
1603 }
1604 else
1605 *pu64 = 0;
1606 return rcStrict;
1607}
1608
1609#else /* IEM_WITH_SETJMP */
1610
1611/**
1612 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1613 *
1614 * @returns The opcode qword.
1615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1616 */
1617uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1618{
1619# ifdef IEM_WITH_CODE_TLB
1620 uint64_t u64;
1621 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1622 return u64;
1623# else
1624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1625 if (rcStrict == VINF_SUCCESS)
1626 {
1627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1628 pVCpu->iem.s.offOpcode = offOpcode + 8;
1629# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1630 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1631# else
1632 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1633 pVCpu->iem.s.abOpcode[offOpcode + 1],
1634 pVCpu->iem.s.abOpcode[offOpcode + 2],
1635 pVCpu->iem.s.abOpcode[offOpcode + 3],
1636 pVCpu->iem.s.abOpcode[offOpcode + 4],
1637 pVCpu->iem.s.abOpcode[offOpcode + 5],
1638 pVCpu->iem.s.abOpcode[offOpcode + 6],
1639 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1640# endif
1641 }
1642 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1643# endif
1644}
1645
1646#endif /* IEM_WITH_SETJMP */
1647
1648
1649
1650/** @name Misc Worker Functions.
1651 * @{
1652 */
1653
1654/**
1655 * Gets the exception class for the specified exception vector.
1656 *
1657 * @returns The class of the specified exception.
1658 * @param uVector The exception vector.
1659 */
1660static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1661{
1662 Assert(uVector <= X86_XCPT_LAST);
1663 switch (uVector)
1664 {
1665 case X86_XCPT_DE:
1666 case X86_XCPT_TS:
1667 case X86_XCPT_NP:
1668 case X86_XCPT_SS:
1669 case X86_XCPT_GP:
1670 case X86_XCPT_SX: /* AMD only */
1671 return IEMXCPTCLASS_CONTRIBUTORY;
1672
1673 case X86_XCPT_PF:
1674 case X86_XCPT_VE: /* Intel only */
1675 return IEMXCPTCLASS_PAGE_FAULT;
1676
1677 case X86_XCPT_DF:
1678 return IEMXCPTCLASS_DOUBLE_FAULT;
1679 }
1680 return IEMXCPTCLASS_BENIGN;
1681}
1682
1683
1684/**
1685 * Evaluates how to handle an exception caused during delivery of another event
1686 * (exception / interrupt).
1687 *
1688 * @returns How to handle the recursive exception.
1689 * @param pVCpu The cross context virtual CPU structure of the
1690 * calling thread.
1691 * @param fPrevFlags The flags of the previous event.
1692 * @param uPrevVector The vector of the previous event.
1693 * @param fCurFlags The flags of the current exception.
1694 * @param uCurVector The vector of the current exception.
1695 * @param pfXcptRaiseInfo Where to store additional information about the
1696 * exception condition. Optional.
1697 */
1698VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1699 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1700{
1701 /*
1702 * Only CPU exceptions can be raised while delivering other events, software interrupt
1703 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1704 */
1705 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1706 Assert(pVCpu); RT_NOREF(pVCpu);
1707 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1708
1709 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1710 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1711 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1712 {
1713 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1714 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1715 {
1716 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1717 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1718 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1719 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1720 {
1721 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1722 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1723 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1724 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1725 uCurVector, pVCpu->cpum.GstCtx.cr2));
1726 }
1727 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1728 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1729 {
1730 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1731 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1732 }
1733 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1734 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1735 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1736 {
1737 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1738 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1739 }
1740 }
1741 else
1742 {
1743 if (uPrevVector == X86_XCPT_NMI)
1744 {
1745 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1746 if (uCurVector == X86_XCPT_PF)
1747 {
1748 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1749 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1750 }
1751 }
1752 else if ( uPrevVector == X86_XCPT_AC
1753 && uCurVector == X86_XCPT_AC)
1754 {
1755 enmRaise = IEMXCPTRAISE_CPU_HANG;
1756 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1757 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1758 }
1759 }
1760 }
1761 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1762 {
1763 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1764 if (uCurVector == X86_XCPT_PF)
1765 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1766 }
1767 else
1768 {
1769 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1770 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1771 }
1772
1773 if (pfXcptRaiseInfo)
1774 *pfXcptRaiseInfo = fRaiseInfo;
1775 return enmRaise;
1776}
1777
1778
1779/**
1780 * Enters the CPU shutdown state initiated by a triple fault or other
1781 * unrecoverable conditions.
1782 *
1783 * @returns Strict VBox status code.
1784 * @param pVCpu The cross context virtual CPU structure of the
1785 * calling thread.
1786 */
1787static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1788{
1789 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1790 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1791
1792 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1793 {
1794 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1795 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1796 }
1797
1798 RT_NOREF(pVCpu);
1799 return VINF_EM_TRIPLE_FAULT;
1800}
1801
1802
1803/**
1804 * Validates a new SS segment.
1805 *
1806 * @returns VBox strict status code.
1807 * @param pVCpu The cross context virtual CPU structure of the
1808 * calling thread.
1809 * @param NewSS The new SS selctor.
1810 * @param uCpl The CPL to load the stack for.
1811 * @param pDesc Where to return the descriptor.
1812 */
1813static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1814{
1815 /* Null selectors are not allowed (we're not called for dispatching
1816 interrupts with SS=0 in long mode). */
1817 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1818 {
1819 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1820 return iemRaiseTaskSwitchFault0(pVCpu);
1821 }
1822
1823 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1824 if ((NewSS & X86_SEL_RPL) != uCpl)
1825 {
1826 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1827 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1828 }
1829
1830 /*
1831 * Read the descriptor.
1832 */
1833 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1834 if (rcStrict != VINF_SUCCESS)
1835 return rcStrict;
1836
1837 /*
1838 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1839 */
1840 if (!pDesc->Legacy.Gen.u1DescType)
1841 {
1842 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1843 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1844 }
1845
1846 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1847 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1848 {
1849 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1850 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1851 }
1852 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1853 {
1854 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1855 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1856 }
1857
1858 /* Is it there? */
1859 /** @todo testcase: Is this checked before the canonical / limit check below? */
1860 if (!pDesc->Legacy.Gen.u1Present)
1861 {
1862 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1863 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1864 }
1865
1866 return VINF_SUCCESS;
1867}
1868
1869/** @} */
1870
1871
1872/** @name Raising Exceptions.
1873 *
1874 * @{
1875 */
1876
1877
1878/**
1879 * Loads the specified stack far pointer from the TSS.
1880 *
1881 * @returns VBox strict status code.
1882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1883 * @param uCpl The CPL to load the stack for.
1884 * @param pSelSS Where to return the new stack segment.
1885 * @param puEsp Where to return the new stack pointer.
1886 */
1887static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1888{
1889 VBOXSTRICTRC rcStrict;
1890 Assert(uCpl < 4);
1891
1892 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1893 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1894 {
1895 /*
1896 * 16-bit TSS (X86TSS16).
1897 */
1898 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1899 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1900 {
1901 uint32_t off = uCpl * 4 + 2;
1902 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1903 {
1904 /** @todo check actual access pattern here. */
1905 uint32_t u32Tmp = 0; /* gcc maybe... */
1906 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1907 if (rcStrict == VINF_SUCCESS)
1908 {
1909 *puEsp = RT_LOWORD(u32Tmp);
1910 *pSelSS = RT_HIWORD(u32Tmp);
1911 return VINF_SUCCESS;
1912 }
1913 }
1914 else
1915 {
1916 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1917 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1918 }
1919 break;
1920 }
1921
1922 /*
1923 * 32-bit TSS (X86TSS32).
1924 */
1925 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1926 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1927 {
1928 uint32_t off = uCpl * 8 + 4;
1929 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1930 {
1931/** @todo check actual access pattern here. */
1932 uint64_t u64Tmp;
1933 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1934 if (rcStrict == VINF_SUCCESS)
1935 {
1936 *puEsp = u64Tmp & UINT32_MAX;
1937 *pSelSS = (RTSEL)(u64Tmp >> 32);
1938 return VINF_SUCCESS;
1939 }
1940 }
1941 else
1942 {
1943 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1944 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1945 }
1946 break;
1947 }
1948
1949 default:
1950 AssertFailed();
1951 rcStrict = VERR_IEM_IPE_4;
1952 break;
1953 }
1954
1955 *puEsp = 0; /* make gcc happy */
1956 *pSelSS = 0; /* make gcc happy */
1957 return rcStrict;
1958}
1959
1960
1961/**
1962 * Loads the specified stack pointer from the 64-bit TSS.
1963 *
1964 * @returns VBox strict status code.
1965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1966 * @param uCpl The CPL to load the stack for.
1967 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1968 * @param puRsp Where to return the new stack pointer.
1969 */
1970static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1971{
1972 Assert(uCpl < 4);
1973 Assert(uIst < 8);
1974 *puRsp = 0; /* make gcc happy */
1975
1976 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1977 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1978
1979 uint32_t off;
1980 if (uIst)
1981 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1982 else
1983 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1984 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1985 {
1986 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1987 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1988 }
1989
1990 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1991}
1992
1993
1994/**
1995 * Adjust the CPU state according to the exception being raised.
1996 *
1997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1998 * @param u8Vector The exception that has been raised.
1999 */
2000DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2001{
2002 switch (u8Vector)
2003 {
2004 case X86_XCPT_DB:
2005 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2006 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2007 break;
2008 /** @todo Read the AMD and Intel exception reference... */
2009 }
2010}
2011
2012
2013/**
2014 * Implements exceptions and interrupts for real mode.
2015 *
2016 * @returns VBox strict status code.
2017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2018 * @param cbInstr The number of bytes to offset rIP by in the return
2019 * address.
2020 * @param u8Vector The interrupt / exception vector number.
2021 * @param fFlags The flags.
2022 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2023 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2024 */
2025static VBOXSTRICTRC
2026iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2027 uint8_t cbInstr,
2028 uint8_t u8Vector,
2029 uint32_t fFlags,
2030 uint16_t uErr,
2031 uint64_t uCr2) RT_NOEXCEPT
2032{
2033 NOREF(uErr); NOREF(uCr2);
2034 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2035
2036 /*
2037 * Read the IDT entry.
2038 */
2039 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2040 {
2041 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2042 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2043 }
2044 RTFAR16 Idte;
2045 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2046 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2047 {
2048 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2049 return rcStrict;
2050 }
2051
2052 /*
2053 * Push the stack frame.
2054 */
2055 uint16_t *pu16Frame;
2056 uint64_t uNewRsp;
2057 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2058 if (rcStrict != VINF_SUCCESS)
2059 return rcStrict;
2060
2061 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2062#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2063 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2064 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2065 fEfl |= UINT16_C(0xf000);
2066#endif
2067 pu16Frame[2] = (uint16_t)fEfl;
2068 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2069 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2070 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2071 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2072 return rcStrict;
2073
2074 /*
2075 * Load the vector address into cs:ip and make exception specific state
2076 * adjustments.
2077 */
2078 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2079 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2080 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2081 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2082 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2083 pVCpu->cpum.GstCtx.rip = Idte.off;
2084 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2085 IEMMISC_SET_EFL(pVCpu, fEfl);
2086
2087 /** @todo do we actually do this in real mode? */
2088 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2089 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2090
2091 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2092 so best leave them alone in case we're in a weird kind of real mode... */
2093
2094 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2095}
2096
2097
2098/**
2099 * Loads a NULL data selector into when coming from V8086 mode.
2100 *
2101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2102 * @param pSReg Pointer to the segment register.
2103 */
2104DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2105{
2106 pSReg->Sel = 0;
2107 pSReg->ValidSel = 0;
2108 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2109 {
2110 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2111 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2112 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2113 }
2114 else
2115 {
2116 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2117 /** @todo check this on AMD-V */
2118 pSReg->u64Base = 0;
2119 pSReg->u32Limit = 0;
2120 }
2121}
2122
2123
2124/**
2125 * Loads a segment selector during a task switch in V8086 mode.
2126 *
2127 * @param pSReg Pointer to the segment register.
2128 * @param uSel The selector value to load.
2129 */
2130DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2131{
2132 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2133 pSReg->Sel = uSel;
2134 pSReg->ValidSel = uSel;
2135 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2136 pSReg->u64Base = uSel << 4;
2137 pSReg->u32Limit = 0xffff;
2138 pSReg->Attr.u = 0xf3;
2139}
2140
2141
2142/**
2143 * Loads a segment selector during a task switch in protected mode.
2144 *
2145 * In this task switch scenario, we would throw \#TS exceptions rather than
2146 * \#GPs.
2147 *
2148 * @returns VBox strict status code.
2149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2150 * @param pSReg Pointer to the segment register.
2151 * @param uSel The new selector value.
2152 *
2153 * @remarks This does _not_ handle CS or SS.
2154 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2155 */
2156static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2157{
2158 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2159
2160 /* Null data selector. */
2161 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2162 {
2163 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2164 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2165 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2166 return VINF_SUCCESS;
2167 }
2168
2169 /* Fetch the descriptor. */
2170 IEMSELDESC Desc;
2171 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2172 if (rcStrict != VINF_SUCCESS)
2173 {
2174 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2175 VBOXSTRICTRC_VAL(rcStrict)));
2176 return rcStrict;
2177 }
2178
2179 /* Must be a data segment or readable code segment. */
2180 if ( !Desc.Legacy.Gen.u1DescType
2181 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2182 {
2183 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2184 Desc.Legacy.Gen.u4Type));
2185 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2186 }
2187
2188 /* Check privileges for data segments and non-conforming code segments. */
2189 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2190 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2191 {
2192 /* The RPL and the new CPL must be less than or equal to the DPL. */
2193 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2194 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2195 {
2196 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2197 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2198 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2199 }
2200 }
2201
2202 /* Is it there? */
2203 if (!Desc.Legacy.Gen.u1Present)
2204 {
2205 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2206 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2207 }
2208
2209 /* The base and limit. */
2210 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2211 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2212
2213 /*
2214 * Ok, everything checked out fine. Now set the accessed bit before
2215 * committing the result into the registers.
2216 */
2217 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2218 {
2219 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2220 if (rcStrict != VINF_SUCCESS)
2221 return rcStrict;
2222 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2223 }
2224
2225 /* Commit */
2226 pSReg->Sel = uSel;
2227 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2228 pSReg->u32Limit = cbLimit;
2229 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2230 pSReg->ValidSel = uSel;
2231 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2232 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2233 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2234
2235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2236 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2237 return VINF_SUCCESS;
2238}
2239
2240
2241/**
2242 * Performs a task switch.
2243 *
2244 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2245 * caller is responsible for performing the necessary checks (like DPL, TSS
2246 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2247 * reference for JMP, CALL, IRET.
2248 *
2249 * If the task switch is the due to a software interrupt or hardware exception,
2250 * the caller is responsible for validating the TSS selector and descriptor. See
2251 * Intel Instruction reference for INT n.
2252 *
2253 * @returns VBox strict status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2255 * @param enmTaskSwitch The cause of the task switch.
2256 * @param uNextEip The EIP effective after the task switch.
2257 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2258 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2259 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2260 * @param SelTSS The TSS selector of the new task.
2261 * @param pNewDescTSS Pointer to the new TSS descriptor.
2262 */
2263VBOXSTRICTRC
2264iemTaskSwitch(PVMCPUCC pVCpu,
2265 IEMTASKSWITCH enmTaskSwitch,
2266 uint32_t uNextEip,
2267 uint32_t fFlags,
2268 uint16_t uErr,
2269 uint64_t uCr2,
2270 RTSEL SelTSS,
2271 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2272{
2273 Assert(!IEM_IS_REAL_MODE(pVCpu));
2274 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2275 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2276
2277 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2278 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2279 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2280 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2281 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2282
2283 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2284 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2285
2286 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2287 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2288
2289 /* Update CR2 in case it's a page-fault. */
2290 /** @todo This should probably be done much earlier in IEM/PGM. See
2291 * @bugref{5653#c49}. */
2292 if (fFlags & IEM_XCPT_FLAGS_CR2)
2293 pVCpu->cpum.GstCtx.cr2 = uCr2;
2294
2295 /*
2296 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2297 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2298 */
2299 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2300 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2301 if (uNewTSSLimit < uNewTSSLimitMin)
2302 {
2303 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2304 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2305 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2306 }
2307
2308 /*
2309 * Task switches in VMX non-root mode always cause task switches.
2310 * The new TSS must have been read and validated (DPL, limits etc.) before a
2311 * task-switch VM-exit commences.
2312 *
2313 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2314 */
2315 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2316 {
2317 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2318 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2319 }
2320
2321 /*
2322 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2323 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2324 */
2325 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2326 {
2327 uint32_t const uExitInfo1 = SelTSS;
2328 uint32_t uExitInfo2 = uErr;
2329 switch (enmTaskSwitch)
2330 {
2331 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2332 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2333 default: break;
2334 }
2335 if (fFlags & IEM_XCPT_FLAGS_ERR)
2336 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2337 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2338 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2339
2340 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2341 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2342 RT_NOREF2(uExitInfo1, uExitInfo2);
2343 }
2344
2345 /*
2346 * Check the current TSS limit. The last written byte to the current TSS during the
2347 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2348 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2349 *
2350 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2351 * end up with smaller than "legal" TSS limits.
2352 */
2353 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2354 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2355 if (uCurTSSLimit < uCurTSSLimitMin)
2356 {
2357 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2358 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2359 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2360 }
2361
2362 /*
2363 * Verify that the new TSS can be accessed and map it. Map only the required contents
2364 * and not the entire TSS.
2365 */
2366 void *pvNewTSS;
2367 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2368 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2369 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2370 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2371 * not perform correct translation if this happens. See Intel spec. 7.2.1
2372 * "Task-State Segment". */
2373 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2374 if (rcStrict != VINF_SUCCESS)
2375 {
2376 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2377 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2378 return rcStrict;
2379 }
2380
2381 /*
2382 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2383 */
2384 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2385 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2386 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2387 {
2388 PX86DESC pDescCurTSS;
2389 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2390 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2391 if (rcStrict != VINF_SUCCESS)
2392 {
2393 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2394 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2395 return rcStrict;
2396 }
2397
2398 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2399 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2400 if (rcStrict != VINF_SUCCESS)
2401 {
2402 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2403 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2404 return rcStrict;
2405 }
2406
2407 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2408 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2409 {
2410 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2411 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2412 fEFlags &= ~X86_EFL_NT;
2413 }
2414 }
2415
2416 /*
2417 * Save the CPU state into the current TSS.
2418 */
2419 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2420 if (GCPtrNewTSS == GCPtrCurTSS)
2421 {
2422 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2423 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2424 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2425 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2426 pVCpu->cpum.GstCtx.ldtr.Sel));
2427 }
2428 if (fIsNewTSS386)
2429 {
2430 /*
2431 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2432 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2433 */
2434 void *pvCurTSS32;
2435 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2436 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2437 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2438 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2439 if (rcStrict != VINF_SUCCESS)
2440 {
2441 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2442 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2443 return rcStrict;
2444 }
2445
2446 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2447 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2448 pCurTSS32->eip = uNextEip;
2449 pCurTSS32->eflags = fEFlags;
2450 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2451 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2452 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2453 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2454 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2455 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2456 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2457 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2458 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2459 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2460 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2461 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2462 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2463 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2464
2465 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2466 if (rcStrict != VINF_SUCCESS)
2467 {
2468 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2469 VBOXSTRICTRC_VAL(rcStrict)));
2470 return rcStrict;
2471 }
2472 }
2473 else
2474 {
2475 /*
2476 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2477 */
2478 void *pvCurTSS16;
2479 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2480 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2481 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2482 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2483 if (rcStrict != VINF_SUCCESS)
2484 {
2485 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2486 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2487 return rcStrict;
2488 }
2489
2490 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2491 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2492 pCurTSS16->ip = uNextEip;
2493 pCurTSS16->flags = (uint16_t)fEFlags;
2494 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2495 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2496 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2497 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2498 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2499 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2500 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2501 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2502 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2503 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2504 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2505 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2506
2507 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2508 if (rcStrict != VINF_SUCCESS)
2509 {
2510 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2511 VBOXSTRICTRC_VAL(rcStrict)));
2512 return rcStrict;
2513 }
2514 }
2515
2516 /*
2517 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2518 */
2519 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2520 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2521 {
2522 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2523 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2524 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2525 }
2526
2527 /*
2528 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2529 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2530 */
2531 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2532 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2533 bool fNewDebugTrap;
2534 if (fIsNewTSS386)
2535 {
2536 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2537 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2538 uNewEip = pNewTSS32->eip;
2539 uNewEflags = pNewTSS32->eflags;
2540 uNewEax = pNewTSS32->eax;
2541 uNewEcx = pNewTSS32->ecx;
2542 uNewEdx = pNewTSS32->edx;
2543 uNewEbx = pNewTSS32->ebx;
2544 uNewEsp = pNewTSS32->esp;
2545 uNewEbp = pNewTSS32->ebp;
2546 uNewEsi = pNewTSS32->esi;
2547 uNewEdi = pNewTSS32->edi;
2548 uNewES = pNewTSS32->es;
2549 uNewCS = pNewTSS32->cs;
2550 uNewSS = pNewTSS32->ss;
2551 uNewDS = pNewTSS32->ds;
2552 uNewFS = pNewTSS32->fs;
2553 uNewGS = pNewTSS32->gs;
2554 uNewLdt = pNewTSS32->selLdt;
2555 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2556 }
2557 else
2558 {
2559 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2560 uNewCr3 = 0;
2561 uNewEip = pNewTSS16->ip;
2562 uNewEflags = pNewTSS16->flags;
2563 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2564 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2565 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2566 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2567 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2568 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2569 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2570 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2571 uNewES = pNewTSS16->es;
2572 uNewCS = pNewTSS16->cs;
2573 uNewSS = pNewTSS16->ss;
2574 uNewDS = pNewTSS16->ds;
2575 uNewFS = 0;
2576 uNewGS = 0;
2577 uNewLdt = pNewTSS16->selLdt;
2578 fNewDebugTrap = false;
2579 }
2580
2581 if (GCPtrNewTSS == GCPtrCurTSS)
2582 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2583 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2584
2585 /*
2586 * We're done accessing the new TSS.
2587 */
2588 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2589 if (rcStrict != VINF_SUCCESS)
2590 {
2591 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2592 return rcStrict;
2593 }
2594
2595 /*
2596 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2597 */
2598 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2599 {
2600 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2601 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2602 if (rcStrict != VINF_SUCCESS)
2603 {
2604 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2605 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2606 return rcStrict;
2607 }
2608
2609 /* Check that the descriptor indicates the new TSS is available (not busy). */
2610 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2611 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2612 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2613
2614 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2615 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2616 if (rcStrict != VINF_SUCCESS)
2617 {
2618 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2619 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2620 return rcStrict;
2621 }
2622 }
2623
2624 /*
2625 * From this point on, we're technically in the new task. We will defer exceptions
2626 * until the completion of the task switch but before executing any instructions in the new task.
2627 */
2628 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2629 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2630 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2631 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2632 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2633 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2634 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2635
2636 /* Set the busy bit in TR. */
2637 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2638
2639 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2640 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2641 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2642 {
2643 uNewEflags |= X86_EFL_NT;
2644 }
2645
2646 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2647 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2648 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2649
2650 pVCpu->cpum.GstCtx.eip = uNewEip;
2651 pVCpu->cpum.GstCtx.eax = uNewEax;
2652 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2653 pVCpu->cpum.GstCtx.edx = uNewEdx;
2654 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2655 pVCpu->cpum.GstCtx.esp = uNewEsp;
2656 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2657 pVCpu->cpum.GstCtx.esi = uNewEsi;
2658 pVCpu->cpum.GstCtx.edi = uNewEdi;
2659
2660 uNewEflags &= X86_EFL_LIVE_MASK;
2661 uNewEflags |= X86_EFL_RA1_MASK;
2662 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2663
2664 /*
2665 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2666 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2667 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2668 */
2669 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2670 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2671
2672 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2673 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2674
2675 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2676 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2677
2678 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2679 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2680
2681 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2682 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2683
2684 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2685 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2686 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2687
2688 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2689 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2690 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2691 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2692
2693 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2694 {
2695 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2696 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2697 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2698 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2699 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2700 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2701 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2702 }
2703
2704 /*
2705 * Switch CR3 for the new task.
2706 */
2707 if ( fIsNewTSS386
2708 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2709 {
2710 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2711 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2712 AssertRCSuccessReturn(rc, rc);
2713
2714 /* Inform PGM. */
2715 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2716 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2717 AssertRCReturn(rc, rc);
2718 /* ignore informational status codes */
2719
2720 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2721 }
2722
2723 /*
2724 * Switch LDTR for the new task.
2725 */
2726 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2727 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2728 else
2729 {
2730 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2731
2732 IEMSELDESC DescNewLdt;
2733 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2734 if (rcStrict != VINF_SUCCESS)
2735 {
2736 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2737 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2738 return rcStrict;
2739 }
2740 if ( !DescNewLdt.Legacy.Gen.u1Present
2741 || DescNewLdt.Legacy.Gen.u1DescType
2742 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2743 {
2744 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2745 uNewLdt, DescNewLdt.Legacy.u));
2746 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2747 }
2748
2749 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2750 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2751 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2752 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2753 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2754 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2755 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2756 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2757 }
2758
2759 IEMSELDESC DescSS;
2760 if (IEM_IS_V86_MODE(pVCpu))
2761 {
2762 IEM_SET_CPL(pVCpu, 3);
2763 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2764 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2765 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2766 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2767 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2768 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2769
2770 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2771 DescSS.Legacy.u = 0;
2772 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2773 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2774 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2775 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2776 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2777 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2778 DescSS.Legacy.Gen.u2Dpl = 3;
2779 }
2780 else
2781 {
2782 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2783
2784 /*
2785 * Load the stack segment for the new task.
2786 */
2787 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2788 {
2789 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2790 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2791 }
2792
2793 /* Fetch the descriptor. */
2794 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2795 if (rcStrict != VINF_SUCCESS)
2796 {
2797 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2798 VBOXSTRICTRC_VAL(rcStrict)));
2799 return rcStrict;
2800 }
2801
2802 /* SS must be a data segment and writable. */
2803 if ( !DescSS.Legacy.Gen.u1DescType
2804 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2805 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2806 {
2807 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2808 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2809 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2810 }
2811
2812 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2813 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2814 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2815 {
2816 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2817 uNewCpl));
2818 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2819 }
2820
2821 /* Is it there? */
2822 if (!DescSS.Legacy.Gen.u1Present)
2823 {
2824 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2825 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2826 }
2827
2828 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2829 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2830
2831 /* Set the accessed bit before committing the result into SS. */
2832 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2833 {
2834 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2835 if (rcStrict != VINF_SUCCESS)
2836 return rcStrict;
2837 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2838 }
2839
2840 /* Commit SS. */
2841 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2842 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2843 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2844 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2845 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2846 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2848
2849 /* CPL has changed, update IEM before loading rest of segments. */
2850 IEM_SET_CPL(pVCpu, uNewCpl);
2851
2852 /*
2853 * Load the data segments for the new task.
2854 */
2855 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2856 if (rcStrict != VINF_SUCCESS)
2857 return rcStrict;
2858 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2859 if (rcStrict != VINF_SUCCESS)
2860 return rcStrict;
2861 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2862 if (rcStrict != VINF_SUCCESS)
2863 return rcStrict;
2864 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2865 if (rcStrict != VINF_SUCCESS)
2866 return rcStrict;
2867
2868 /*
2869 * Load the code segment for the new task.
2870 */
2871 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2872 {
2873 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2874 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2875 }
2876
2877 /* Fetch the descriptor. */
2878 IEMSELDESC DescCS;
2879 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2880 if (rcStrict != VINF_SUCCESS)
2881 {
2882 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2883 return rcStrict;
2884 }
2885
2886 /* CS must be a code segment. */
2887 if ( !DescCS.Legacy.Gen.u1DescType
2888 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2889 {
2890 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2891 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2892 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2893 }
2894
2895 /* For conforming CS, DPL must be less than or equal to the RPL. */
2896 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2897 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2898 {
2899 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2900 DescCS.Legacy.Gen.u2Dpl));
2901 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2902 }
2903
2904 /* For non-conforming CS, DPL must match RPL. */
2905 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2906 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2907 {
2908 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2909 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2910 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2911 }
2912
2913 /* Is it there? */
2914 if (!DescCS.Legacy.Gen.u1Present)
2915 {
2916 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2917 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2918 }
2919
2920 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2921 u64Base = X86DESC_BASE(&DescCS.Legacy);
2922
2923 /* Set the accessed bit before committing the result into CS. */
2924 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2925 {
2926 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2927 if (rcStrict != VINF_SUCCESS)
2928 return rcStrict;
2929 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2930 }
2931
2932 /* Commit CS. */
2933 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2934 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2935 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2936 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2937 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2938 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2939 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2940 }
2941
2942 /* Make sure the CPU mode is correct. */
2943 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2944 if (fExecNew != pVCpu->iem.s.fExec)
2945 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2946 pVCpu->iem.s.fExec = fExecNew;
2947
2948 /** @todo Debug trap. */
2949 if (fIsNewTSS386 && fNewDebugTrap)
2950 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2951
2952 /*
2953 * Construct the error code masks based on what caused this task switch.
2954 * See Intel Instruction reference for INT.
2955 */
2956 uint16_t uExt;
2957 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2958 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2959 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2960 uExt = 1;
2961 else
2962 uExt = 0;
2963
2964 /*
2965 * Push any error code on to the new stack.
2966 */
2967 if (fFlags & IEM_XCPT_FLAGS_ERR)
2968 {
2969 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2970 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2971 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2972
2973 /* Check that there is sufficient space on the stack. */
2974 /** @todo Factor out segment limit checking for normal/expand down segments
2975 * into a separate function. */
2976 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2977 {
2978 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2979 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2980 {
2981 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2982 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2983 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2984 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2985 }
2986 }
2987 else
2988 {
2989 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2990 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2991 {
2992 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2993 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2994 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2995 }
2996 }
2997
2998
2999 if (fIsNewTSS386)
3000 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3001 else
3002 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3003 if (rcStrict != VINF_SUCCESS)
3004 {
3005 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3006 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3007 return rcStrict;
3008 }
3009 }
3010
3011 /* Check the new EIP against the new CS limit. */
3012 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3013 {
3014 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3015 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3016 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3017 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3018 }
3019
3020 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3021 pVCpu->cpum.GstCtx.ss.Sel));
3022 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3023}
3024
3025
3026/**
3027 * Implements exceptions and interrupts for protected mode.
3028 *
3029 * @returns VBox strict status code.
3030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3031 * @param cbInstr The number of bytes to offset rIP by in the return
3032 * address.
3033 * @param u8Vector The interrupt / exception vector number.
3034 * @param fFlags The flags.
3035 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3036 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3037 */
3038static VBOXSTRICTRC
3039iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3040 uint8_t cbInstr,
3041 uint8_t u8Vector,
3042 uint32_t fFlags,
3043 uint16_t uErr,
3044 uint64_t uCr2) RT_NOEXCEPT
3045{
3046 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3047
3048 /*
3049 * Read the IDT entry.
3050 */
3051 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3052 {
3053 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3054 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3055 }
3056 X86DESC Idte;
3057 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3058 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3059 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3060 {
3061 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3062 return rcStrict;
3063 }
3064 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3065 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3066 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3067
3068 /*
3069 * Check the descriptor type, DPL and such.
3070 * ASSUMES this is done in the same order as described for call-gate calls.
3071 */
3072 if (Idte.Gate.u1DescType)
3073 {
3074 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3075 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3076 }
3077 bool fTaskGate = false;
3078 uint8_t f32BitGate = true;
3079 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3080 switch (Idte.Gate.u4Type)
3081 {
3082 case X86_SEL_TYPE_SYS_UNDEFINED:
3083 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3084 case X86_SEL_TYPE_SYS_LDT:
3085 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3086 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3087 case X86_SEL_TYPE_SYS_UNDEFINED2:
3088 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3089 case X86_SEL_TYPE_SYS_UNDEFINED3:
3090 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3091 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3092 case X86_SEL_TYPE_SYS_UNDEFINED4:
3093 {
3094 /** @todo check what actually happens when the type is wrong...
3095 * esp. call gates. */
3096 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3097 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3098 }
3099
3100 case X86_SEL_TYPE_SYS_286_INT_GATE:
3101 f32BitGate = false;
3102 RT_FALL_THRU();
3103 case X86_SEL_TYPE_SYS_386_INT_GATE:
3104 fEflToClear |= X86_EFL_IF;
3105 break;
3106
3107 case X86_SEL_TYPE_SYS_TASK_GATE:
3108 fTaskGate = true;
3109#ifndef IEM_IMPLEMENTS_TASKSWITCH
3110 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3111#endif
3112 break;
3113
3114 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3115 f32BitGate = false;
3116 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3117 break;
3118
3119 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3120 }
3121
3122 /* Check DPL against CPL if applicable. */
3123 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3124 {
3125 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3126 {
3127 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3128 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3129 }
3130 }
3131
3132 /* Is it there? */
3133 if (!Idte.Gate.u1Present)
3134 {
3135 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3136 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3137 }
3138
3139 /* Is it a task-gate? */
3140 if (fTaskGate)
3141 {
3142 /*
3143 * Construct the error code masks based on what caused this task switch.
3144 * See Intel Instruction reference for INT.
3145 */
3146 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3147 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3148 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3149 RTSEL SelTSS = Idte.Gate.u16Sel;
3150
3151 /*
3152 * Fetch the TSS descriptor in the GDT.
3153 */
3154 IEMSELDESC DescTSS;
3155 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3156 if (rcStrict != VINF_SUCCESS)
3157 {
3158 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3159 VBOXSTRICTRC_VAL(rcStrict)));
3160 return rcStrict;
3161 }
3162
3163 /* The TSS descriptor must be a system segment and be available (not busy). */
3164 if ( DescTSS.Legacy.Gen.u1DescType
3165 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3166 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3167 {
3168 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3169 u8Vector, SelTSS, DescTSS.Legacy.au64));
3170 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3171 }
3172
3173 /* The TSS must be present. */
3174 if (!DescTSS.Legacy.Gen.u1Present)
3175 {
3176 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3177 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3178 }
3179
3180 /* Do the actual task switch. */
3181 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3182 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3183 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3184 }
3185
3186 /* A null CS is bad. */
3187 RTSEL NewCS = Idte.Gate.u16Sel;
3188 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3189 {
3190 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3191 return iemRaiseGeneralProtectionFault0(pVCpu);
3192 }
3193
3194 /* Fetch the descriptor for the new CS. */
3195 IEMSELDESC DescCS;
3196 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3197 if (rcStrict != VINF_SUCCESS)
3198 {
3199 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3200 return rcStrict;
3201 }
3202
3203 /* Must be a code segment. */
3204 if (!DescCS.Legacy.Gen.u1DescType)
3205 {
3206 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3207 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3208 }
3209 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3210 {
3211 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3212 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3213 }
3214
3215 /* Don't allow lowering the privilege level. */
3216 /** @todo Does the lowering of privileges apply to software interrupts
3217 * only? This has bearings on the more-privileged or
3218 * same-privilege stack behavior further down. A testcase would
3219 * be nice. */
3220 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3221 {
3222 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3223 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3224 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3225 }
3226
3227 /* Make sure the selector is present. */
3228 if (!DescCS.Legacy.Gen.u1Present)
3229 {
3230 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3231 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3232 }
3233
3234 /* Check the new EIP against the new CS limit. */
3235 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3236 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3237 ? Idte.Gate.u16OffsetLow
3238 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3239 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3240 if (uNewEip > cbLimitCS)
3241 {
3242 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3243 u8Vector, uNewEip, cbLimitCS, NewCS));
3244 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3245 }
3246 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3247
3248 /* Calc the flag image to push. */
3249 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3250 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3251 fEfl &= ~X86_EFL_RF;
3252 else
3253 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3254
3255 /* From V8086 mode only go to CPL 0. */
3256 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3257 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3258 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3261 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3262 }
3263
3264 /*
3265 * If the privilege level changes, we need to get a new stack from the TSS.
3266 * This in turns means validating the new SS and ESP...
3267 */
3268 if (uNewCpl != IEM_GET_CPL(pVCpu))
3269 {
3270 RTSEL NewSS;
3271 uint32_t uNewEsp;
3272 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3273 if (rcStrict != VINF_SUCCESS)
3274 return rcStrict;
3275
3276 IEMSELDESC DescSS;
3277 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3278 if (rcStrict != VINF_SUCCESS)
3279 return rcStrict;
3280 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3281 if (!DescSS.Legacy.Gen.u1DefBig)
3282 {
3283 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3284 uNewEsp = (uint16_t)uNewEsp;
3285 }
3286
3287 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3288
3289 /* Check that there is sufficient space for the stack frame. */
3290 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3291 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3292 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3293 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3294
3295 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3296 {
3297 if ( uNewEsp - 1 > cbLimitSS
3298 || uNewEsp < cbStackFrame)
3299 {
3300 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3301 u8Vector, NewSS, uNewEsp, cbStackFrame));
3302 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3303 }
3304 }
3305 else
3306 {
3307 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3308 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3309 {
3310 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3311 u8Vector, NewSS, uNewEsp, cbStackFrame));
3312 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3313 }
3314 }
3315
3316 /*
3317 * Start making changes.
3318 */
3319
3320 /* Set the new CPL so that stack accesses use it. */
3321 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3322 IEM_SET_CPL(pVCpu, uNewCpl);
3323
3324 /* Create the stack frame. */
3325 RTPTRUNION uStackFrame;
3326 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3327 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3328 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3329 if (rcStrict != VINF_SUCCESS)
3330 return rcStrict;
3331 void * const pvStackFrame = uStackFrame.pv;
3332 if (f32BitGate)
3333 {
3334 if (fFlags & IEM_XCPT_FLAGS_ERR)
3335 *uStackFrame.pu32++ = uErr;
3336 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3337 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3338 uStackFrame.pu32[2] = fEfl;
3339 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3340 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3341 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3342 if (fEfl & X86_EFL_VM)
3343 {
3344 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3345 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3346 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3347 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3348 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3349 }
3350 }
3351 else
3352 {
3353 if (fFlags & IEM_XCPT_FLAGS_ERR)
3354 *uStackFrame.pu16++ = uErr;
3355 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3356 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3357 uStackFrame.pu16[2] = fEfl;
3358 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3359 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3360 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3361 if (fEfl & X86_EFL_VM)
3362 {
3363 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3364 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3365 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3366 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3367 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3368 }
3369 }
3370 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3371 if (rcStrict != VINF_SUCCESS)
3372 return rcStrict;
3373
3374 /* Mark the selectors 'accessed' (hope this is the correct time). */
3375 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3376 * after pushing the stack frame? (Write protect the gdt + stack to
3377 * find out.) */
3378 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3379 {
3380 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3381 if (rcStrict != VINF_SUCCESS)
3382 return rcStrict;
3383 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3384 }
3385
3386 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3387 {
3388 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3389 if (rcStrict != VINF_SUCCESS)
3390 return rcStrict;
3391 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3392 }
3393
3394 /*
3395 * Start comitting the register changes (joins with the DPL=CPL branch).
3396 */
3397 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3398 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3399 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3400 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3401 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3402 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3403 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3404 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3405 * SP is loaded).
3406 * Need to check the other combinations too:
3407 * - 16-bit TSS, 32-bit handler
3408 * - 32-bit TSS, 16-bit handler */
3409 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3410 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3411 else
3412 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3413
3414 if (fEfl & X86_EFL_VM)
3415 {
3416 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3417 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3418 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3419 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3420 }
3421 }
3422 /*
3423 * Same privilege, no stack change and smaller stack frame.
3424 */
3425 else
3426 {
3427 uint64_t uNewRsp;
3428 RTPTRUNION uStackFrame;
3429 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3430 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3431 if (rcStrict != VINF_SUCCESS)
3432 return rcStrict;
3433 void * const pvStackFrame = uStackFrame.pv;
3434
3435 if (f32BitGate)
3436 {
3437 if (fFlags & IEM_XCPT_FLAGS_ERR)
3438 *uStackFrame.pu32++ = uErr;
3439 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3440 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3441 uStackFrame.pu32[2] = fEfl;
3442 }
3443 else
3444 {
3445 if (fFlags & IEM_XCPT_FLAGS_ERR)
3446 *uStackFrame.pu16++ = uErr;
3447 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3448 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3449 uStackFrame.pu16[2] = fEfl;
3450 }
3451 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3452 if (rcStrict != VINF_SUCCESS)
3453 return rcStrict;
3454
3455 /* Mark the CS selector as 'accessed'. */
3456 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3457 {
3458 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3459 if (rcStrict != VINF_SUCCESS)
3460 return rcStrict;
3461 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3462 }
3463
3464 /*
3465 * Start committing the register changes (joins with the other branch).
3466 */
3467 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3468 }
3469
3470 /* ... register committing continues. */
3471 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3472 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3473 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3474 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3475 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3476 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3477
3478 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3479 fEfl &= ~fEflToClear;
3480 IEMMISC_SET_EFL(pVCpu, fEfl);
3481
3482 if (fFlags & IEM_XCPT_FLAGS_CR2)
3483 pVCpu->cpum.GstCtx.cr2 = uCr2;
3484
3485 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3486 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3487
3488 /* Make sure the execution flags are correct. */
3489 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3490 if (fExecNew != pVCpu->iem.s.fExec)
3491 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3492 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3493 pVCpu->iem.s.fExec = fExecNew;
3494 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3495
3496 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3497}
3498
3499
3500/**
3501 * Implements exceptions and interrupts for long mode.
3502 *
3503 * @returns VBox strict status code.
3504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3505 * @param cbInstr The number of bytes to offset rIP by in the return
3506 * address.
3507 * @param u8Vector The interrupt / exception vector number.
3508 * @param fFlags The flags.
3509 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3510 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3511 */
3512static VBOXSTRICTRC
3513iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3514 uint8_t cbInstr,
3515 uint8_t u8Vector,
3516 uint32_t fFlags,
3517 uint16_t uErr,
3518 uint64_t uCr2) RT_NOEXCEPT
3519{
3520 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3521
3522 /*
3523 * Read the IDT entry.
3524 */
3525 uint16_t offIdt = (uint16_t)u8Vector << 4;
3526 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3527 {
3528 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3529 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3530 }
3531 X86DESC64 Idte;
3532#ifdef _MSC_VER /* Shut up silly compiler warning. */
3533 Idte.au64[0] = 0;
3534 Idte.au64[1] = 0;
3535#endif
3536 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3537 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3538 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3539 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3540 {
3541 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3542 return rcStrict;
3543 }
3544 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3545 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3546 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3547
3548 /*
3549 * Check the descriptor type, DPL and such.
3550 * ASSUMES this is done in the same order as described for call-gate calls.
3551 */
3552 if (Idte.Gate.u1DescType)
3553 {
3554 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3555 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3556 }
3557 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3558 switch (Idte.Gate.u4Type)
3559 {
3560 case AMD64_SEL_TYPE_SYS_INT_GATE:
3561 fEflToClear |= X86_EFL_IF;
3562 break;
3563 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3564 break;
3565
3566 default:
3567 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3568 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3569 }
3570
3571 /* Check DPL against CPL if applicable. */
3572 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3573 {
3574 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3575 {
3576 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3577 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3578 }
3579 }
3580
3581 /* Is it there? */
3582 if (!Idte.Gate.u1Present)
3583 {
3584 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3585 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3586 }
3587
3588 /* A null CS is bad. */
3589 RTSEL NewCS = Idte.Gate.u16Sel;
3590 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3591 {
3592 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3593 return iemRaiseGeneralProtectionFault0(pVCpu);
3594 }
3595
3596 /* Fetch the descriptor for the new CS. */
3597 IEMSELDESC DescCS;
3598 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3599 if (rcStrict != VINF_SUCCESS)
3600 {
3601 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3602 return rcStrict;
3603 }
3604
3605 /* Must be a 64-bit code segment. */
3606 if (!DescCS.Long.Gen.u1DescType)
3607 {
3608 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3609 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3610 }
3611 if ( !DescCS.Long.Gen.u1Long
3612 || DescCS.Long.Gen.u1DefBig
3613 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3616 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3617 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3618 }
3619
3620 /* Don't allow lowering the privilege level. For non-conforming CS
3621 selectors, the CS.DPL sets the privilege level the trap/interrupt
3622 handler runs at. For conforming CS selectors, the CPL remains
3623 unchanged, but the CS.DPL must be <= CPL. */
3624 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3625 * when CPU in Ring-0. Result \#GP? */
3626 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3627 {
3628 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3629 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3630 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3631 }
3632
3633
3634 /* Make sure the selector is present. */
3635 if (!DescCS.Legacy.Gen.u1Present)
3636 {
3637 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3638 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3639 }
3640
3641 /* Check that the new RIP is canonical. */
3642 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3643 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3644 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3645 if (!IEM_IS_CANONICAL(uNewRip))
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3648 return iemRaiseGeneralProtectionFault0(pVCpu);
3649 }
3650
3651 /*
3652 * If the privilege level changes or if the IST isn't zero, we need to get
3653 * a new stack from the TSS.
3654 */
3655 uint64_t uNewRsp;
3656 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3657 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3658 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3659 || Idte.Gate.u3IST != 0)
3660 {
3661 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3662 if (rcStrict != VINF_SUCCESS)
3663 return rcStrict;
3664 }
3665 else
3666 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3667 uNewRsp &= ~(uint64_t)0xf;
3668
3669 /*
3670 * Calc the flag image to push.
3671 */
3672 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3673 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3674 fEfl &= ~X86_EFL_RF;
3675 else
3676 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3677
3678 /*
3679 * Start making changes.
3680 */
3681 /* Set the new CPL so that stack accesses use it. */
3682 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3683 IEM_SET_CPL(pVCpu, uNewCpl);
3684/** @todo Setting CPL this early seems wrong as it would affect and errors we
3685 * raise accessing the stack and (?) GDT/LDT... */
3686
3687 /* Create the stack frame. */
3688 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3689 RTPTRUNION uStackFrame;
3690 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3691 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3692 if (rcStrict != VINF_SUCCESS)
3693 return rcStrict;
3694 void * const pvStackFrame = uStackFrame.pv;
3695
3696 if (fFlags & IEM_XCPT_FLAGS_ERR)
3697 *uStackFrame.pu64++ = uErr;
3698 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3699 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3700 uStackFrame.pu64[2] = fEfl;
3701 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3702 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3703 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3704 if (rcStrict != VINF_SUCCESS)
3705 return rcStrict;
3706
3707 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3708 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3709 * after pushing the stack frame? (Write protect the gdt + stack to
3710 * find out.) */
3711 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3712 {
3713 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3714 if (rcStrict != VINF_SUCCESS)
3715 return rcStrict;
3716 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3717 }
3718
3719 /*
3720 * Start comitting the register changes.
3721 */
3722 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3723 * hidden registers when interrupting 32-bit or 16-bit code! */
3724 if (uNewCpl != uOldCpl)
3725 {
3726 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3727 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3728 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3729 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3730 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3731 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3732 }
3733 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3734 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3735 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3736 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3737 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3738 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3739 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3740 pVCpu->cpum.GstCtx.rip = uNewRip;
3741
3742 fEfl &= ~fEflToClear;
3743 IEMMISC_SET_EFL(pVCpu, fEfl);
3744
3745 if (fFlags & IEM_XCPT_FLAGS_CR2)
3746 pVCpu->cpum.GstCtx.cr2 = uCr2;
3747
3748 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3749 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3750
3751 iemRecalcExecModeAndCplFlags(pVCpu);
3752
3753 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3754}
3755
3756
3757/**
3758 * Implements exceptions and interrupts.
3759 *
3760 * All exceptions and interrupts goes thru this function!
3761 *
3762 * @returns VBox strict status code.
3763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3764 * @param cbInstr The number of bytes to offset rIP by in the return
3765 * address.
3766 * @param u8Vector The interrupt / exception vector number.
3767 * @param fFlags The flags.
3768 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3769 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3770 */
3771VBOXSTRICTRC
3772iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3773 uint8_t cbInstr,
3774 uint8_t u8Vector,
3775 uint32_t fFlags,
3776 uint16_t uErr,
3777 uint64_t uCr2) RT_NOEXCEPT
3778{
3779 /*
3780 * Get all the state that we might need here.
3781 */
3782 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3783 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3784
3785#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3786 /*
3787 * Flush prefetch buffer
3788 */
3789 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3790#endif
3791
3792 /*
3793 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3794 */
3795 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3796 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3797 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3798 | IEM_XCPT_FLAGS_BP_INSTR
3799 | IEM_XCPT_FLAGS_ICEBP_INSTR
3800 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3801 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3802 {
3803 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3804 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3805 u8Vector = X86_XCPT_GP;
3806 uErr = 0;
3807 }
3808#ifdef DBGFTRACE_ENABLED
3809 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3810 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3811 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3812#endif
3813
3814 /*
3815 * Evaluate whether NMI blocking should be in effect.
3816 * Normally, NMI blocking is in effect whenever we inject an NMI.
3817 */
3818 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3819 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3820
3821#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3822 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3823 {
3824 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3825 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3826 return rcStrict0;
3827
3828 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3829 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3830 {
3831 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3832 fBlockNmi = false;
3833 }
3834 }
3835#endif
3836
3837#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3838 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3839 {
3840 /*
3841 * If the event is being injected as part of VMRUN, it isn't subject to event
3842 * intercepts in the nested-guest. However, secondary exceptions that occur
3843 * during injection of any event -are- subject to exception intercepts.
3844 *
3845 * See AMD spec. 15.20 "Event Injection".
3846 */
3847 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3848 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3849 else
3850 {
3851 /*
3852 * Check and handle if the event being raised is intercepted.
3853 */
3854 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3855 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3856 return rcStrict0;
3857 }
3858 }
3859#endif
3860
3861 /*
3862 * Set NMI blocking if necessary.
3863 */
3864 if (fBlockNmi)
3865 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3866
3867 /*
3868 * Do recursion accounting.
3869 */
3870 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3871 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3872 if (pVCpu->iem.s.cXcptRecursions == 0)
3873 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3874 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3875 else
3876 {
3877 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3878 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3879 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3880
3881 if (pVCpu->iem.s.cXcptRecursions >= 4)
3882 {
3883#ifdef DEBUG_bird
3884 AssertFailed();
3885#endif
3886 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3887 }
3888
3889 /*
3890 * Evaluate the sequence of recurring events.
3891 */
3892 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3893 NULL /* pXcptRaiseInfo */);
3894 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3895 { /* likely */ }
3896 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3897 {
3898 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3899 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3900 u8Vector = X86_XCPT_DF;
3901 uErr = 0;
3902#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3903 /* VMX nested-guest #DF intercept needs to be checked here. */
3904 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3905 {
3906 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3907 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3908 return rcStrict0;
3909 }
3910#endif
3911 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3912 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3913 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3914 }
3915 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3916 {
3917 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3918 return iemInitiateCpuShutdown(pVCpu);
3919 }
3920 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3921 {
3922 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3923 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3924 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3925 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3926 return VERR_EM_GUEST_CPU_HANG;
3927 }
3928 else
3929 {
3930 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3931 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3932 return VERR_IEM_IPE_9;
3933 }
3934
3935 /*
3936 * The 'EXT' bit is set when an exception occurs during deliver of an external
3937 * event (such as an interrupt or earlier exception)[1]. Privileged software
3938 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3939 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3940 *
3941 * [1] - Intel spec. 6.13 "Error Code"
3942 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3943 * [3] - Intel Instruction reference for INT n.
3944 */
3945 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3946 && (fFlags & IEM_XCPT_FLAGS_ERR)
3947 && u8Vector != X86_XCPT_PF
3948 && u8Vector != X86_XCPT_DF)
3949 {
3950 uErr |= X86_TRAP_ERR_EXTERNAL;
3951 }
3952 }
3953
3954 pVCpu->iem.s.cXcptRecursions++;
3955 pVCpu->iem.s.uCurXcpt = u8Vector;
3956 pVCpu->iem.s.fCurXcpt = fFlags;
3957 pVCpu->iem.s.uCurXcptErr = uErr;
3958 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3959
3960 /*
3961 * Extensive logging.
3962 */
3963#if defined(LOG_ENABLED) && defined(IN_RING3)
3964 if (LogIs3Enabled())
3965 {
3966 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3967 PVM pVM = pVCpu->CTX_SUFF(pVM);
3968 char szRegs[4096];
3969 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3970 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3971 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3972 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3973 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3974 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3975 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3976 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3977 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3978 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3979 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3980 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3981 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3982 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3983 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3984 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3985 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3986 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3987 " efer=%016VR{efer}\n"
3988 " pat=%016VR{pat}\n"
3989 " sf_mask=%016VR{sf_mask}\n"
3990 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3991 " lstar=%016VR{lstar}\n"
3992 " star=%016VR{star} cstar=%016VR{cstar}\n"
3993 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3994 );
3995
3996 char szInstr[256];
3997 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3998 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3999 szInstr, sizeof(szInstr), NULL);
4000 Log3(("%s%s\n", szRegs, szInstr));
4001 }
4002#endif /* LOG_ENABLED */
4003
4004 /*
4005 * Stats.
4006 */
4007 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4008 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4009 else if (u8Vector <= X86_XCPT_LAST)
4010 {
4011 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4012 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4013 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4014 }
4015
4016 /*
4017 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4018 * to ensure that a stale TLB or paging cache entry will only cause one
4019 * spurious #PF.
4020 */
4021 if ( u8Vector == X86_XCPT_PF
4022 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4023 IEMTlbInvalidatePage(pVCpu, uCr2);
4024
4025 /*
4026 * Call the mode specific worker function.
4027 */
4028 VBOXSTRICTRC rcStrict;
4029 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4030 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4031 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4032 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4033 else
4034 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4035
4036 /* Flush the prefetch buffer. */
4037#ifdef IEM_WITH_CODE_TLB
4038 pVCpu->iem.s.pbInstrBuf = NULL;
4039#else
4040 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4041#endif
4042
4043 /*
4044 * Unwind.
4045 */
4046 pVCpu->iem.s.cXcptRecursions--;
4047 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4048 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4049 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4050 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4051 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4052 return rcStrict;
4053}
4054
4055#ifdef IEM_WITH_SETJMP
4056/**
4057 * See iemRaiseXcptOrInt. Will not return.
4058 */
4059DECL_NO_RETURN(void)
4060iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4061 uint8_t cbInstr,
4062 uint8_t u8Vector,
4063 uint32_t fFlags,
4064 uint16_t uErr,
4065 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4066{
4067 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4068 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4069}
4070#endif
4071
4072
4073/** \#DE - 00. */
4074VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4075{
4076 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4077}
4078
4079
4080/** \#DB - 01.
4081 * @note This automatically clear DR7.GD. */
4082VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4083{
4084 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4085 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4086 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4087}
4088
4089
4090/** \#BR - 05. */
4091VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4092{
4093 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4094}
4095
4096
4097/** \#UD - 06. */
4098VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4099{
4100 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4101}
4102
4103
4104/** \#NM - 07. */
4105VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4106{
4107 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4108}
4109
4110
4111/** \#TS(err) - 0a. */
4112VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4113{
4114 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4115}
4116
4117
4118/** \#TS(tr) - 0a. */
4119VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4120{
4121 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4122 pVCpu->cpum.GstCtx.tr.Sel, 0);
4123}
4124
4125
4126/** \#TS(0) - 0a. */
4127VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4128{
4129 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4130 0, 0);
4131}
4132
4133
4134/** \#TS(err) - 0a. */
4135VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4136{
4137 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4138 uSel & X86_SEL_MASK_OFF_RPL, 0);
4139}
4140
4141
4142/** \#NP(err) - 0b. */
4143VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4144{
4145 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4146}
4147
4148
4149/** \#NP(sel) - 0b. */
4150VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4151{
4152 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4153 uSel & ~X86_SEL_RPL, 0);
4154}
4155
4156
4157/** \#SS(seg) - 0c. */
4158VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4159{
4160 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4161 uSel & ~X86_SEL_RPL, 0);
4162}
4163
4164
4165/** \#SS(err) - 0c. */
4166VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4167{
4168 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4169}
4170
4171
4172/** \#GP(n) - 0d. */
4173VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4174{
4175 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4176}
4177
4178
4179/** \#GP(0) - 0d. */
4180VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4181{
4182 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4183}
4184
4185#ifdef IEM_WITH_SETJMP
4186/** \#GP(0) - 0d. */
4187DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4188{
4189 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4190}
4191#endif
4192
4193
4194/** \#GP(sel) - 0d. */
4195VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4196{
4197 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4198 Sel & ~X86_SEL_RPL, 0);
4199}
4200
4201
4202/** \#GP(0) - 0d. */
4203VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4204{
4205 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4206}
4207
4208
4209/** \#GP(sel) - 0d. */
4210VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4211{
4212 NOREF(iSegReg); NOREF(fAccess);
4213 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4214 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4215}
4216
4217#ifdef IEM_WITH_SETJMP
4218/** \#GP(sel) - 0d, longjmp. */
4219DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4220{
4221 NOREF(iSegReg); NOREF(fAccess);
4222 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4223 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4224}
4225#endif
4226
4227/** \#GP(sel) - 0d. */
4228VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4229{
4230 NOREF(Sel);
4231 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4232}
4233
4234#ifdef IEM_WITH_SETJMP
4235/** \#GP(sel) - 0d, longjmp. */
4236DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4237{
4238 NOREF(Sel);
4239 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4240}
4241#endif
4242
4243
4244/** \#GP(sel) - 0d. */
4245VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4246{
4247 NOREF(iSegReg); NOREF(fAccess);
4248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4249}
4250
4251#ifdef IEM_WITH_SETJMP
4252/** \#GP(sel) - 0d, longjmp. */
4253DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4254{
4255 NOREF(iSegReg); NOREF(fAccess);
4256 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4257}
4258#endif
4259
4260
4261/** \#PF(n) - 0e. */
4262VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4263{
4264 uint16_t uErr;
4265 switch (rc)
4266 {
4267 case VERR_PAGE_NOT_PRESENT:
4268 case VERR_PAGE_TABLE_NOT_PRESENT:
4269 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4270 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4271 uErr = 0;
4272 break;
4273
4274 default:
4275 AssertMsgFailed(("%Rrc\n", rc));
4276 RT_FALL_THRU();
4277 case VERR_ACCESS_DENIED:
4278 uErr = X86_TRAP_PF_P;
4279 break;
4280
4281 /** @todo reserved */
4282 }
4283
4284 if (IEM_GET_CPL(pVCpu) == 3)
4285 uErr |= X86_TRAP_PF_US;
4286
4287 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4288 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4289 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4290 uErr |= X86_TRAP_PF_ID;
4291
4292#if 0 /* This is so much non-sense, really. Why was it done like that? */
4293 /* Note! RW access callers reporting a WRITE protection fault, will clear
4294 the READ flag before calling. So, read-modify-write accesses (RW)
4295 can safely be reported as READ faults. */
4296 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4297 uErr |= X86_TRAP_PF_RW;
4298#else
4299 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4300 {
4301 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4302 /// (regardless of outcome of the comparison in the latter case).
4303 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4304 uErr |= X86_TRAP_PF_RW;
4305 }
4306#endif
4307
4308 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4309 of the memory operand rather than at the start of it. (Not sure what
4310 happens if it crosses a page boundrary.) The current heuristics for
4311 this is to report the #PF for the last byte if the access is more than
4312 64 bytes. This is probably not correct, but we can work that out later,
4313 main objective now is to get FXSAVE to work like for real hardware and
4314 make bs3-cpu-basic2 work. */
4315 if (cbAccess <= 64)
4316 { /* likely*/ }
4317 else
4318 GCPtrWhere += cbAccess - 1;
4319
4320 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4321 uErr, GCPtrWhere);
4322}
4323
4324#ifdef IEM_WITH_SETJMP
4325/** \#PF(n) - 0e, longjmp. */
4326DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4327 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4328{
4329 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4330}
4331#endif
4332
4333
4334/** \#MF(0) - 10. */
4335VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4336{
4337 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4338 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4339
4340 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4341 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4342 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4343}
4344
4345
4346/** \#AC(0) - 11. */
4347VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4348{
4349 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4350}
4351
4352#ifdef IEM_WITH_SETJMP
4353/** \#AC(0) - 11, longjmp. */
4354DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4355{
4356 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4357}
4358#endif
4359
4360
4361/** \#XF(0)/\#XM(0) - 19. */
4362VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4363{
4364 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4365}
4366
4367
4368/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4369IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4370{
4371 NOREF(cbInstr);
4372 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4373}
4374
4375
4376/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4377IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4378{
4379 NOREF(cbInstr);
4380 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4381}
4382
4383
4384/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4385IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4386{
4387 NOREF(cbInstr);
4388 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4389}
4390
4391
4392/** @} */
4393
4394/** @name Common opcode decoders.
4395 * @{
4396 */
4397//#include <iprt/mem.h>
4398
4399/**
4400 * Used to add extra details about a stub case.
4401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4402 */
4403void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4404{
4405#if defined(LOG_ENABLED) && defined(IN_RING3)
4406 PVM pVM = pVCpu->CTX_SUFF(pVM);
4407 char szRegs[4096];
4408 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4409 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4410 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4411 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4412 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4413 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4414 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4415 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4416 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4417 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4418 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4419 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4420 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4421 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4422 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4423 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4424 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4425 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4426 " efer=%016VR{efer}\n"
4427 " pat=%016VR{pat}\n"
4428 " sf_mask=%016VR{sf_mask}\n"
4429 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4430 " lstar=%016VR{lstar}\n"
4431 " star=%016VR{star} cstar=%016VR{cstar}\n"
4432 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4433 );
4434
4435 char szInstr[256];
4436 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4437 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4438 szInstr, sizeof(szInstr), NULL);
4439
4440 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4441#else
4442 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4443#endif
4444}
4445
4446/** @} */
4447
4448
4449
4450/** @name Register Access.
4451 * @{
4452 */
4453
4454/**
4455 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4456 *
4457 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4458 * segment limit.
4459 *
4460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4461 * @param cbInstr Instruction size.
4462 * @param offNextInstr The offset of the next instruction.
4463 * @param enmEffOpSize Effective operand size.
4464 */
4465VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4466 IEMMODE enmEffOpSize) RT_NOEXCEPT
4467{
4468 switch (enmEffOpSize)
4469 {
4470 case IEMMODE_16BIT:
4471 {
4472 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4473 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4474 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4475 pVCpu->cpum.GstCtx.rip = uNewIp;
4476 else
4477 return iemRaiseGeneralProtectionFault0(pVCpu);
4478 break;
4479 }
4480
4481 case IEMMODE_32BIT:
4482 {
4483 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4484 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4485
4486 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4487 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4488 pVCpu->cpum.GstCtx.rip = uNewEip;
4489 else
4490 return iemRaiseGeneralProtectionFault0(pVCpu);
4491 break;
4492 }
4493
4494 case IEMMODE_64BIT:
4495 {
4496 Assert(IEM_IS_64BIT_CODE(pVCpu));
4497
4498 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4499 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4500 pVCpu->cpum.GstCtx.rip = uNewRip;
4501 else
4502 return iemRaiseGeneralProtectionFault0(pVCpu);
4503 break;
4504 }
4505
4506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4507 }
4508
4509#ifndef IEM_WITH_CODE_TLB
4510 /* Flush the prefetch buffer. */
4511 pVCpu->iem.s.cbOpcode = cbInstr;
4512#endif
4513
4514 /*
4515 * Clear RF and finish the instruction (maybe raise #DB).
4516 */
4517 return iemRegFinishClearingRF(pVCpu);
4518}
4519
4520
4521/**
4522 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4523 *
4524 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4525 * segment limit.
4526 *
4527 * @returns Strict VBox status code.
4528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4529 * @param cbInstr Instruction size.
4530 * @param offNextInstr The offset of the next instruction.
4531 */
4532VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4533{
4534 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4535
4536 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4537 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4538 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4539 pVCpu->cpum.GstCtx.rip = uNewIp;
4540 else
4541 return iemRaiseGeneralProtectionFault0(pVCpu);
4542
4543#ifndef IEM_WITH_CODE_TLB
4544 /* Flush the prefetch buffer. */
4545 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4546#endif
4547
4548 /*
4549 * Clear RF and finish the instruction (maybe raise #DB).
4550 */
4551 return iemRegFinishClearingRF(pVCpu);
4552}
4553
4554
4555/**
4556 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4557 *
4558 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4559 * segment limit.
4560 *
4561 * @returns Strict VBox status code.
4562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4563 * @param cbInstr Instruction size.
4564 * @param offNextInstr The offset of the next instruction.
4565 * @param enmEffOpSize Effective operand size.
4566 */
4567VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4568 IEMMODE enmEffOpSize) RT_NOEXCEPT
4569{
4570 if (enmEffOpSize == IEMMODE_32BIT)
4571 {
4572 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4573
4574 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4575 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4576 pVCpu->cpum.GstCtx.rip = uNewEip;
4577 else
4578 return iemRaiseGeneralProtectionFault0(pVCpu);
4579 }
4580 else
4581 {
4582 Assert(enmEffOpSize == IEMMODE_64BIT);
4583
4584 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4585 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4586 pVCpu->cpum.GstCtx.rip = uNewRip;
4587 else
4588 return iemRaiseGeneralProtectionFault0(pVCpu);
4589 }
4590
4591#ifndef IEM_WITH_CODE_TLB
4592 /* Flush the prefetch buffer. */
4593 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4594#endif
4595
4596 /*
4597 * Clear RF and finish the instruction (maybe raise #DB).
4598 */
4599 return iemRegFinishClearingRF(pVCpu);
4600}
4601
4602
4603/**
4604 * Performs a near jump to the specified address.
4605 *
4606 * May raise a \#GP(0) if the new IP outside the code segment limit.
4607 *
4608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4609 * @param uNewIp The new IP value.
4610 */
4611VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4612{
4613 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4614 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4615 pVCpu->cpum.GstCtx.rip = uNewIp;
4616 else
4617 return iemRaiseGeneralProtectionFault0(pVCpu);
4618 /** @todo Test 16-bit jump in 64-bit mode. */
4619
4620#ifndef IEM_WITH_CODE_TLB
4621 /* Flush the prefetch buffer. */
4622 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4623#endif
4624
4625 /*
4626 * Clear RF and finish the instruction (maybe raise #DB).
4627 */
4628 return iemRegFinishClearingRF(pVCpu);
4629}
4630
4631
4632/**
4633 * Performs a near jump to the specified address.
4634 *
4635 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4636 *
4637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4638 * @param uNewEip The new EIP value.
4639 */
4640VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4641{
4642 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4643 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4644
4645 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4646 pVCpu->cpum.GstCtx.rip = uNewEip;
4647 else
4648 return iemRaiseGeneralProtectionFault0(pVCpu);
4649
4650#ifndef IEM_WITH_CODE_TLB
4651 /* Flush the prefetch buffer. */
4652 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4653#endif
4654
4655 /*
4656 * Clear RF and finish the instruction (maybe raise #DB).
4657 */
4658 return iemRegFinishClearingRF(pVCpu);
4659}
4660
4661
4662/**
4663 * Performs a near jump to the specified address.
4664 *
4665 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4666 * segment limit.
4667 *
4668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4669 * @param uNewRip The new RIP value.
4670 */
4671VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4672{
4673 Assert(IEM_IS_64BIT_CODE(pVCpu));
4674
4675 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4676 pVCpu->cpum.GstCtx.rip = uNewRip;
4677 else
4678 return iemRaiseGeneralProtectionFault0(pVCpu);
4679
4680#ifndef IEM_WITH_CODE_TLB
4681 /* Flush the prefetch buffer. */
4682 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4683#endif
4684
4685 /*
4686 * Clear RF and finish the instruction (maybe raise #DB).
4687 */
4688 return iemRegFinishClearingRF(pVCpu);
4689}
4690
4691/** @} */
4692
4693
4694/** @name FPU access and helpers.
4695 *
4696 * @{
4697 */
4698
4699/**
4700 * Updates the x87.DS and FPUDP registers.
4701 *
4702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4703 * @param pFpuCtx The FPU context.
4704 * @param iEffSeg The effective segment register.
4705 * @param GCPtrEff The effective address relative to @a iEffSeg.
4706 */
4707DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4708{
4709 RTSEL sel;
4710 switch (iEffSeg)
4711 {
4712 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4713 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4714 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4715 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4716 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4717 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4718 default:
4719 AssertMsgFailed(("%d\n", iEffSeg));
4720 sel = pVCpu->cpum.GstCtx.ds.Sel;
4721 }
4722 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4723 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4724 {
4725 pFpuCtx->DS = 0;
4726 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4727 }
4728 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4729 {
4730 pFpuCtx->DS = sel;
4731 pFpuCtx->FPUDP = GCPtrEff;
4732 }
4733 else
4734 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4735}
4736
4737
4738/**
4739 * Rotates the stack registers in the push direction.
4740 *
4741 * @param pFpuCtx The FPU context.
4742 * @remarks This is a complete waste of time, but fxsave stores the registers in
4743 * stack order.
4744 */
4745DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4746{
4747 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4748 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4749 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4750 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4751 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4752 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4753 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4754 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4755 pFpuCtx->aRegs[0].r80 = r80Tmp;
4756}
4757
4758
4759/**
4760 * Rotates the stack registers in the pop direction.
4761 *
4762 * @param pFpuCtx The FPU context.
4763 * @remarks This is a complete waste of time, but fxsave stores the registers in
4764 * stack order.
4765 */
4766DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4767{
4768 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4769 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4770 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4771 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4772 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4773 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4774 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4775 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4776 pFpuCtx->aRegs[7].r80 = r80Tmp;
4777}
4778
4779
4780/**
4781 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4782 * exception prevents it.
4783 *
4784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4785 * @param pResult The FPU operation result to push.
4786 * @param pFpuCtx The FPU context.
4787 */
4788static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4789{
4790 /* Update FSW and bail if there are pending exceptions afterwards. */
4791 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4792 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4793 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4794 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4795 {
4796 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4797 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4798 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4799 pFpuCtx->FSW = fFsw;
4800 return;
4801 }
4802
4803 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4804 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4805 {
4806 /* All is fine, push the actual value. */
4807 pFpuCtx->FTW |= RT_BIT(iNewTop);
4808 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4809 }
4810 else if (pFpuCtx->FCW & X86_FCW_IM)
4811 {
4812 /* Masked stack overflow, push QNaN. */
4813 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4814 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4815 }
4816 else
4817 {
4818 /* Raise stack overflow, don't push anything. */
4819 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4820 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4821 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4822 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4823 return;
4824 }
4825
4826 fFsw &= ~X86_FSW_TOP_MASK;
4827 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4828 pFpuCtx->FSW = fFsw;
4829
4830 iemFpuRotateStackPush(pFpuCtx);
4831 RT_NOREF(pVCpu);
4832}
4833
4834
4835/**
4836 * Stores a result in a FPU register and updates the FSW and FTW.
4837 *
4838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4839 * @param pFpuCtx The FPU context.
4840 * @param pResult The result to store.
4841 * @param iStReg Which FPU register to store it in.
4842 */
4843static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4844{
4845 Assert(iStReg < 8);
4846 uint16_t fNewFsw = pFpuCtx->FSW;
4847 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4848 fNewFsw &= ~X86_FSW_C_MASK;
4849 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4850 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4851 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4852 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4853 pFpuCtx->FSW = fNewFsw;
4854 pFpuCtx->FTW |= RT_BIT(iReg);
4855 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4856 RT_NOREF(pVCpu);
4857}
4858
4859
4860/**
4861 * Only updates the FPU status word (FSW) with the result of the current
4862 * instruction.
4863 *
4864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4865 * @param pFpuCtx The FPU context.
4866 * @param u16FSW The FSW output of the current instruction.
4867 */
4868static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4869{
4870 uint16_t fNewFsw = pFpuCtx->FSW;
4871 fNewFsw &= ~X86_FSW_C_MASK;
4872 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4873 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4874 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4875 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4876 pFpuCtx->FSW = fNewFsw;
4877 RT_NOREF(pVCpu);
4878}
4879
4880
4881/**
4882 * Pops one item off the FPU stack if no pending exception prevents it.
4883 *
4884 * @param pFpuCtx The FPU context.
4885 */
4886static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4887{
4888 /* Check pending exceptions. */
4889 uint16_t uFSW = pFpuCtx->FSW;
4890 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4891 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4892 return;
4893
4894 /* TOP--. */
4895 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4896 uFSW &= ~X86_FSW_TOP_MASK;
4897 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4898 pFpuCtx->FSW = uFSW;
4899
4900 /* Mark the previous ST0 as empty. */
4901 iOldTop >>= X86_FSW_TOP_SHIFT;
4902 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4903
4904 /* Rotate the registers. */
4905 iemFpuRotateStackPop(pFpuCtx);
4906}
4907
4908
4909/**
4910 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4911 *
4912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4913 * @param pResult The FPU operation result to push.
4914 */
4915void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4916{
4917 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4918 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4919 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4920}
4921
4922
4923/**
4924 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4925 * and sets FPUDP and FPUDS.
4926 *
4927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4928 * @param pResult The FPU operation result to push.
4929 * @param iEffSeg The effective segment register.
4930 * @param GCPtrEff The effective address relative to @a iEffSeg.
4931 */
4932void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4933{
4934 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4935 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4936 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4937 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4938}
4939
4940
4941/**
4942 * Replace ST0 with the first value and push the second onto the FPU stack,
4943 * unless a pending exception prevents it.
4944 *
4945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4946 * @param pResult The FPU operation result to store and push.
4947 */
4948void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4949{
4950 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4951 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4952
4953 /* Update FSW and bail if there are pending exceptions afterwards. */
4954 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4955 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4956 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4957 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4958 {
4959 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4960 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4961 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4962 pFpuCtx->FSW = fFsw;
4963 return;
4964 }
4965
4966 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4967 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4968 {
4969 /* All is fine, push the actual value. */
4970 pFpuCtx->FTW |= RT_BIT(iNewTop);
4971 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4972 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4973 }
4974 else if (pFpuCtx->FCW & X86_FCW_IM)
4975 {
4976 /* Masked stack overflow, push QNaN. */
4977 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4978 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4979 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4980 }
4981 else
4982 {
4983 /* Raise stack overflow, don't push anything. */
4984 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4985 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4986 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4987 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4988 return;
4989 }
4990
4991 fFsw &= ~X86_FSW_TOP_MASK;
4992 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4993 pFpuCtx->FSW = fFsw;
4994
4995 iemFpuRotateStackPush(pFpuCtx);
4996}
4997
4998
4999/**
5000 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5001 * FOP.
5002 *
5003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5004 * @param pResult The result to store.
5005 * @param iStReg Which FPU register to store it in.
5006 */
5007void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5008{
5009 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5010 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5011 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5012}
5013
5014
5015/**
5016 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5017 * FOP, and then pops the stack.
5018 *
5019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5020 * @param pResult The result to store.
5021 * @param iStReg Which FPU register to store it in.
5022 */
5023void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5024{
5025 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5026 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5027 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5028 iemFpuMaybePopOne(pFpuCtx);
5029}
5030
5031
5032/**
5033 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5034 * FPUDP, and FPUDS.
5035 *
5036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5037 * @param pResult The result to store.
5038 * @param iStReg Which FPU register to store it in.
5039 * @param iEffSeg The effective memory operand selector register.
5040 * @param GCPtrEff The effective memory operand offset.
5041 */
5042void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5043 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5044{
5045 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5046 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5047 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5048 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5049}
5050
5051
5052/**
5053 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5054 * FPUDP, and FPUDS, and then pops the stack.
5055 *
5056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5057 * @param pResult The result to store.
5058 * @param iStReg Which FPU register to store it in.
5059 * @param iEffSeg The effective memory operand selector register.
5060 * @param GCPtrEff The effective memory operand offset.
5061 */
5062void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5063 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5064{
5065 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5066 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5067 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5068 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5069 iemFpuMaybePopOne(pFpuCtx);
5070}
5071
5072
5073/**
5074 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5075 *
5076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5077 */
5078void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
5079{
5080 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5081 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5082}
5083
5084
5085/**
5086 * Updates the FSW, FOP, FPUIP, and FPUCS.
5087 *
5088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5089 * @param u16FSW The FSW from the current instruction.
5090 */
5091void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5092{
5093 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5094 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5095 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5096}
5097
5098
5099/**
5100 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5101 *
5102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5103 * @param u16FSW The FSW from the current instruction.
5104 */
5105void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5106{
5107 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5108 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5109 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5110 iemFpuMaybePopOne(pFpuCtx);
5111}
5112
5113
5114/**
5115 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5116 *
5117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5118 * @param u16FSW The FSW from the current instruction.
5119 * @param iEffSeg The effective memory operand selector register.
5120 * @param GCPtrEff The effective memory operand offset.
5121 */
5122void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5126 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5127 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5128}
5129
5130
5131/**
5132 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5133 *
5134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5135 * @param u16FSW The FSW from the current instruction.
5136 */
5137void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5138{
5139 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5140 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5141 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5142 iemFpuMaybePopOne(pFpuCtx);
5143 iemFpuMaybePopOne(pFpuCtx);
5144}
5145
5146
5147/**
5148 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5149 *
5150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5151 * @param u16FSW The FSW from the current instruction.
5152 * @param iEffSeg The effective memory operand selector register.
5153 * @param GCPtrEff The effective memory operand offset.
5154 */
5155void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5156{
5157 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5158 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5159 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5160 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5161 iemFpuMaybePopOne(pFpuCtx);
5162}
5163
5164
5165/**
5166 * Worker routine for raising an FPU stack underflow exception.
5167 *
5168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5169 * @param pFpuCtx The FPU context.
5170 * @param iStReg The stack register being accessed.
5171 */
5172static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5173{
5174 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5175 if (pFpuCtx->FCW & X86_FCW_IM)
5176 {
5177 /* Masked underflow. */
5178 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5179 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5180 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5181 if (iStReg != UINT8_MAX)
5182 {
5183 pFpuCtx->FTW |= RT_BIT(iReg);
5184 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5185 }
5186 }
5187 else
5188 {
5189 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5190 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5191 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5192 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5193 }
5194 RT_NOREF(pVCpu);
5195}
5196
5197
5198/**
5199 * Raises a FPU stack underflow exception.
5200 *
5201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5202 * @param iStReg The destination register that should be loaded
5203 * with QNaN if \#IS is not masked. Specify
5204 * UINT8_MAX if none (like for fcom).
5205 */
5206void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5207{
5208 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5209 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5210 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5211}
5212
5213
5214void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5215{
5216 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5217 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5218 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5219 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5220}
5221
5222
5223void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5224{
5225 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5226 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5227 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5228 iemFpuMaybePopOne(pFpuCtx);
5229}
5230
5231
5232void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5233{
5234 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5235 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5236 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5237 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5238 iemFpuMaybePopOne(pFpuCtx);
5239}
5240
5241
5242void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5243{
5244 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5245 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5246 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5247 iemFpuMaybePopOne(pFpuCtx);
5248 iemFpuMaybePopOne(pFpuCtx);
5249}
5250
5251
5252void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5253{
5254 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5255 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5256
5257 if (pFpuCtx->FCW & X86_FCW_IM)
5258 {
5259 /* Masked overflow - Push QNaN. */
5260 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5261 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5262 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5263 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5264 pFpuCtx->FTW |= RT_BIT(iNewTop);
5265 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5266 iemFpuRotateStackPush(pFpuCtx);
5267 }
5268 else
5269 {
5270 /* Exception pending - don't change TOP or the register stack. */
5271 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5272 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5273 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5274 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5275 }
5276}
5277
5278
5279void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5280{
5281 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5282 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5283
5284 if (pFpuCtx->FCW & X86_FCW_IM)
5285 {
5286 /* Masked overflow - Push QNaN. */
5287 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5288 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5289 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5290 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5291 pFpuCtx->FTW |= RT_BIT(iNewTop);
5292 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5293 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5294 iemFpuRotateStackPush(pFpuCtx);
5295 }
5296 else
5297 {
5298 /* Exception pending - don't change TOP or the register stack. */
5299 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5300 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5301 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5302 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5303 }
5304}
5305
5306
5307/**
5308 * Worker routine for raising an FPU stack overflow exception on a push.
5309 *
5310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5311 * @param pFpuCtx The FPU context.
5312 */
5313static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5314{
5315 if (pFpuCtx->FCW & X86_FCW_IM)
5316 {
5317 /* Masked overflow. */
5318 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5319 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5320 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5321 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5322 pFpuCtx->FTW |= RT_BIT(iNewTop);
5323 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5324 iemFpuRotateStackPush(pFpuCtx);
5325 }
5326 else
5327 {
5328 /* Exception pending - don't change TOP or the register stack. */
5329 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5330 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5331 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5332 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5333 }
5334 RT_NOREF(pVCpu);
5335}
5336
5337
5338/**
5339 * Raises a FPU stack overflow exception on a push.
5340 *
5341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5342 */
5343void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5344{
5345 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5346 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5347 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5348}
5349
5350
5351/**
5352 * Raises a FPU stack overflow exception on a push with a memory operand.
5353 *
5354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5355 * @param iEffSeg The effective memory operand selector register.
5356 * @param GCPtrEff The effective memory operand offset.
5357 */
5358void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5359{
5360 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5361 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5362 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5363 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5364}
5365
5366/** @} */
5367
5368
5369/** @name SSE+AVX SIMD access and helpers.
5370 *
5371 * @{
5372 */
5373/**
5374 * Stores a result in a SIMD XMM register, updates the MXCSR.
5375 *
5376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5377 * @param pResult The result to store.
5378 * @param iXmmReg Which SIMD XMM register to store the result in.
5379 */
5380void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5381{
5382 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5383 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5384
5385 /* The result is only updated if there is no unmasked exception pending. */
5386 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5387 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5388 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5389}
5390
5391
5392/**
5393 * Updates the MXCSR.
5394 *
5395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5396 * @param fMxcsr The new MXCSR value.
5397 */
5398void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5399{
5400 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5401 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5402}
5403/** @} */
5404
5405
5406/** @name Memory access.
5407 *
5408 * @{
5409 */
5410
5411
5412/**
5413 * Updates the IEMCPU::cbWritten counter if applicable.
5414 *
5415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5416 * @param fAccess The access being accounted for.
5417 * @param cbMem The access size.
5418 */
5419DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5420{
5421 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5422 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5423 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5424}
5425
5426
5427/**
5428 * Applies the segment limit, base and attributes.
5429 *
5430 * This may raise a \#GP or \#SS.
5431 *
5432 * @returns VBox strict status code.
5433 *
5434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5435 * @param fAccess The kind of access which is being performed.
5436 * @param iSegReg The index of the segment register to apply.
5437 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5438 * TSS, ++).
5439 * @param cbMem The access size.
5440 * @param pGCPtrMem Pointer to the guest memory address to apply
5441 * segmentation to. Input and output parameter.
5442 */
5443VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5444{
5445 if (iSegReg == UINT8_MAX)
5446 return VINF_SUCCESS;
5447
5448 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5449 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5450 switch (IEM_GET_CPU_MODE(pVCpu))
5451 {
5452 case IEMMODE_16BIT:
5453 case IEMMODE_32BIT:
5454 {
5455 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5456 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5457
5458 if ( pSel->Attr.n.u1Present
5459 && !pSel->Attr.n.u1Unusable)
5460 {
5461 Assert(pSel->Attr.n.u1DescType);
5462 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5463 {
5464 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5465 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5466 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5467
5468 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5469 {
5470 /** @todo CPL check. */
5471 }
5472
5473 /*
5474 * There are two kinds of data selectors, normal and expand down.
5475 */
5476 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5477 {
5478 if ( GCPtrFirst32 > pSel->u32Limit
5479 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5480 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5481 }
5482 else
5483 {
5484 /*
5485 * The upper boundary is defined by the B bit, not the G bit!
5486 */
5487 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5488 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5489 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5490 }
5491 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5492 }
5493 else
5494 {
5495 /*
5496 * Code selector and usually be used to read thru, writing is
5497 * only permitted in real and V8086 mode.
5498 */
5499 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5500 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5501 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5502 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5503 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5504
5505 if ( GCPtrFirst32 > pSel->u32Limit
5506 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5507 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5508
5509 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5510 {
5511 /** @todo CPL check. */
5512 }
5513
5514 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5515 }
5516 }
5517 else
5518 return iemRaiseGeneralProtectionFault0(pVCpu);
5519 return VINF_SUCCESS;
5520 }
5521
5522 case IEMMODE_64BIT:
5523 {
5524 RTGCPTR GCPtrMem = *pGCPtrMem;
5525 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5526 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5527
5528 Assert(cbMem >= 1);
5529 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5530 return VINF_SUCCESS;
5531 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5532 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5533 return iemRaiseGeneralProtectionFault0(pVCpu);
5534 }
5535
5536 default:
5537 AssertFailedReturn(VERR_IEM_IPE_7);
5538 }
5539}
5540
5541
5542/**
5543 * Translates a virtual address to a physical physical address and checks if we
5544 * can access the page as specified.
5545 *
5546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5547 * @param GCPtrMem The virtual address.
5548 * @param cbAccess The access size, for raising \#PF correctly for
5549 * FXSAVE and such.
5550 * @param fAccess The intended access.
5551 * @param pGCPhysMem Where to return the physical address.
5552 */
5553VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5554 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5555{
5556 /** @todo Need a different PGM interface here. We're currently using
5557 * generic / REM interfaces. this won't cut it for R0. */
5558 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5559 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5560 * here. */
5561 PGMPTWALK Walk;
5562 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5563 if (RT_FAILURE(rc))
5564 {
5565 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5566 /** @todo Check unassigned memory in unpaged mode. */
5567 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5568#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5569 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5571#endif
5572 *pGCPhysMem = NIL_RTGCPHYS;
5573 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5574 }
5575
5576 /* If the page is writable and does not have the no-exec bit set, all
5577 access is allowed. Otherwise we'll have to check more carefully... */
5578 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5579 {
5580 /* Write to read only memory? */
5581 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5582 && !(Walk.fEffective & X86_PTE_RW)
5583 && ( ( IEM_GET_CPL(pVCpu) == 3
5584 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5585 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5586 {
5587 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5588 *pGCPhysMem = NIL_RTGCPHYS;
5589#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5590 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5591 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5592#endif
5593 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5594 }
5595
5596 /* Kernel memory accessed by userland? */
5597 if ( !(Walk.fEffective & X86_PTE_US)
5598 && IEM_GET_CPL(pVCpu) == 3
5599 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5600 {
5601 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5602 *pGCPhysMem = NIL_RTGCPHYS;
5603#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5604 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5605 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5606#endif
5607 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5608 }
5609
5610 /* Executing non-executable memory? */
5611 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5612 && (Walk.fEffective & X86_PTE_PAE_NX)
5613 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5614 {
5615 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5616 *pGCPhysMem = NIL_RTGCPHYS;
5617#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5618 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5619 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5620#endif
5621 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5622 VERR_ACCESS_DENIED);
5623 }
5624 }
5625
5626 /*
5627 * Set the dirty / access flags.
5628 * ASSUMES this is set when the address is translated rather than on committ...
5629 */
5630 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5631 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5632 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5633 {
5634 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5635 AssertRC(rc2);
5636 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5637 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5638 }
5639
5640 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5641 *pGCPhysMem = GCPhys;
5642 return VINF_SUCCESS;
5643}
5644
5645
5646/**
5647 * Looks up a memory mapping entry.
5648 *
5649 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5651 * @param pvMem The memory address.
5652 * @param fAccess The access to.
5653 */
5654DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5655{
5656 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5657 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5658 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5659 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5660 return 0;
5661 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5662 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5663 return 1;
5664 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5665 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5666 return 2;
5667 return VERR_NOT_FOUND;
5668}
5669
5670
5671/**
5672 * Finds a free memmap entry when using iNextMapping doesn't work.
5673 *
5674 * @returns Memory mapping index, 1024 on failure.
5675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5676 */
5677static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5678{
5679 /*
5680 * The easy case.
5681 */
5682 if (pVCpu->iem.s.cActiveMappings == 0)
5683 {
5684 pVCpu->iem.s.iNextMapping = 1;
5685 return 0;
5686 }
5687
5688 /* There should be enough mappings for all instructions. */
5689 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5690
5691 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5692 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5693 return i;
5694
5695 AssertFailedReturn(1024);
5696}
5697
5698
5699/**
5700 * Commits a bounce buffer that needs writing back and unmaps it.
5701 *
5702 * @returns Strict VBox status code.
5703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5704 * @param iMemMap The index of the buffer to commit.
5705 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5706 * Always false in ring-3, obviously.
5707 */
5708static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5709{
5710 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5711 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5712#ifdef IN_RING3
5713 Assert(!fPostponeFail);
5714 RT_NOREF_PV(fPostponeFail);
5715#endif
5716
5717 /*
5718 * Do the writing.
5719 */
5720 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5721 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5722 {
5723 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5724 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5725 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5726 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5727 {
5728 /*
5729 * Carefully and efficiently dealing with access handler return
5730 * codes make this a little bloated.
5731 */
5732 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5733 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5734 pbBuf,
5735 cbFirst,
5736 PGMACCESSORIGIN_IEM);
5737 if (rcStrict == VINF_SUCCESS)
5738 {
5739 if (cbSecond)
5740 {
5741 rcStrict = PGMPhysWrite(pVM,
5742 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5743 pbBuf + cbFirst,
5744 cbSecond,
5745 PGMACCESSORIGIN_IEM);
5746 if (rcStrict == VINF_SUCCESS)
5747 { /* nothing */ }
5748 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5749 {
5750 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5751 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5752 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5753 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5754 }
5755#ifndef IN_RING3
5756 else if (fPostponeFail)
5757 {
5758 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5759 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5760 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5761 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5762 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5763 return iemSetPassUpStatus(pVCpu, rcStrict);
5764 }
5765#endif
5766 else
5767 {
5768 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5769 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5770 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5771 return rcStrict;
5772 }
5773 }
5774 }
5775 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5776 {
5777 if (!cbSecond)
5778 {
5779 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5780 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5781 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5782 }
5783 else
5784 {
5785 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5786 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5787 pbBuf + cbFirst,
5788 cbSecond,
5789 PGMACCESSORIGIN_IEM);
5790 if (rcStrict2 == VINF_SUCCESS)
5791 {
5792 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5793 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5794 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5795 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5796 }
5797 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5798 {
5799 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5800 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5801 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5802 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5803 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5804 }
5805#ifndef IN_RING3
5806 else if (fPostponeFail)
5807 {
5808 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5811 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5812 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5813 return iemSetPassUpStatus(pVCpu, rcStrict);
5814 }
5815#endif
5816 else
5817 {
5818 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5820 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5821 return rcStrict2;
5822 }
5823 }
5824 }
5825#ifndef IN_RING3
5826 else if (fPostponeFail)
5827 {
5828 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5830 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5831 if (!cbSecond)
5832 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5833 else
5834 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5835 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5836 return iemSetPassUpStatus(pVCpu, rcStrict);
5837 }
5838#endif
5839 else
5840 {
5841 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5842 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5843 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5844 return rcStrict;
5845 }
5846 }
5847 else
5848 {
5849 /*
5850 * No access handlers, much simpler.
5851 */
5852 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5853 if (RT_SUCCESS(rc))
5854 {
5855 if (cbSecond)
5856 {
5857 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5858 if (RT_SUCCESS(rc))
5859 { /* likely */ }
5860 else
5861 {
5862 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5864 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5865 return rc;
5866 }
5867 }
5868 }
5869 else
5870 {
5871 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5874 return rc;
5875 }
5876 }
5877 }
5878
5879#if defined(IEM_LOG_MEMORY_WRITES)
5880 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5881 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5882 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5883 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5884 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5885 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5886
5887 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5888 g_cbIemWrote = cbWrote;
5889 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5890#endif
5891
5892 /*
5893 * Free the mapping entry.
5894 */
5895 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5896 Assert(pVCpu->iem.s.cActiveMappings != 0);
5897 pVCpu->iem.s.cActiveMappings--;
5898 return VINF_SUCCESS;
5899}
5900
5901
5902/**
5903 * iemMemMap worker that deals with a request crossing pages.
5904 */
5905static VBOXSTRICTRC
5906iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5907{
5908 Assert(cbMem <= GUEST_PAGE_SIZE);
5909
5910 /*
5911 * Do the address translations.
5912 */
5913 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5914 RTGCPHYS GCPhysFirst;
5915 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5916 if (rcStrict != VINF_SUCCESS)
5917 return rcStrict;
5918 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5919
5920 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5921 RTGCPHYS GCPhysSecond;
5922 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5923 cbSecondPage, fAccess, &GCPhysSecond);
5924 if (rcStrict != VINF_SUCCESS)
5925 return rcStrict;
5926 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5927 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5928
5929 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5930
5931 /*
5932 * Read in the current memory content if it's a read, execute or partial
5933 * write access.
5934 */
5935 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5936
5937 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5938 {
5939 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5940 {
5941 /*
5942 * Must carefully deal with access handler status codes here,
5943 * makes the code a bit bloated.
5944 */
5945 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5946 if (rcStrict == VINF_SUCCESS)
5947 {
5948 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5949 if (rcStrict == VINF_SUCCESS)
5950 { /*likely */ }
5951 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5952 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5953 else
5954 {
5955 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5956 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5957 return rcStrict;
5958 }
5959 }
5960 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5961 {
5962 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5963 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5964 {
5965 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5966 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5967 }
5968 else
5969 {
5970 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5971 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5972 return rcStrict2;
5973 }
5974 }
5975 else
5976 {
5977 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5978 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5979 return rcStrict;
5980 }
5981 }
5982 else
5983 {
5984 /*
5985 * No informational status codes here, much more straight forward.
5986 */
5987 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5988 if (RT_SUCCESS(rc))
5989 {
5990 Assert(rc == VINF_SUCCESS);
5991 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5992 if (RT_SUCCESS(rc))
5993 Assert(rc == VINF_SUCCESS);
5994 else
5995 {
5996 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5997 return rc;
5998 }
5999 }
6000 else
6001 {
6002 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6003 return rc;
6004 }
6005 }
6006 }
6007#ifdef VBOX_STRICT
6008 else
6009 memset(pbBuf, 0xcc, cbMem);
6010 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6011 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6012#endif
6013 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6014
6015 /*
6016 * Commit the bounce buffer entry.
6017 */
6018 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6019 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6020 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6021 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6022 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6023 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6024 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6025 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6026 pVCpu->iem.s.cActiveMappings++;
6027
6028 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6029 *ppvMem = pbBuf;
6030 return VINF_SUCCESS;
6031}
6032
6033
6034/**
6035 * iemMemMap woker that deals with iemMemPageMap failures.
6036 */
6037static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6038 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6039{
6040 /*
6041 * Filter out conditions we can handle and the ones which shouldn't happen.
6042 */
6043 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6044 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6045 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6046 {
6047 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6048 return rcMap;
6049 }
6050 pVCpu->iem.s.cPotentialExits++;
6051
6052 /*
6053 * Read in the current memory content if it's a read, execute or partial
6054 * write access.
6055 */
6056 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6057 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6058 {
6059 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6060 memset(pbBuf, 0xff, cbMem);
6061 else
6062 {
6063 int rc;
6064 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6065 {
6066 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6067 if (rcStrict == VINF_SUCCESS)
6068 { /* nothing */ }
6069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6070 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6071 else
6072 {
6073 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6074 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6075 return rcStrict;
6076 }
6077 }
6078 else
6079 {
6080 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6081 if (RT_SUCCESS(rc))
6082 { /* likely */ }
6083 else
6084 {
6085 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6086 GCPhysFirst, rc));
6087 return rc;
6088 }
6089 }
6090 }
6091 }
6092#ifdef VBOX_STRICT
6093 else
6094 memset(pbBuf, 0xcc, cbMem);
6095#endif
6096#ifdef VBOX_STRICT
6097 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6098 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6099#endif
6100
6101 /*
6102 * Commit the bounce buffer entry.
6103 */
6104 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6105 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6106 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6107 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6108 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6109 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6110 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6111 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6112 pVCpu->iem.s.cActiveMappings++;
6113
6114 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6115 *ppvMem = pbBuf;
6116 return VINF_SUCCESS;
6117}
6118
6119
6120
6121/**
6122 * Maps the specified guest memory for the given kind of access.
6123 *
6124 * This may be using bounce buffering of the memory if it's crossing a page
6125 * boundary or if there is an access handler installed for any of it. Because
6126 * of lock prefix guarantees, we're in for some extra clutter when this
6127 * happens.
6128 *
6129 * This may raise a \#GP, \#SS, \#PF or \#AC.
6130 *
6131 * @returns VBox strict status code.
6132 *
6133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6134 * @param ppvMem Where to return the pointer to the mapped memory.
6135 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6136 * 8, 12, 16, 32 or 512. When used by string operations
6137 * it can be up to a page.
6138 * @param iSegReg The index of the segment register to use for this
6139 * access. The base and limits are checked. Use UINT8_MAX
6140 * to indicate that no segmentation is required (for IDT,
6141 * GDT and LDT accesses).
6142 * @param GCPtrMem The address of the guest memory.
6143 * @param fAccess How the memory is being accessed. The
6144 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6145 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6146 * when raising exceptions.
6147 * @param uAlignCtl Alignment control:
6148 * - Bits 15:0 is the alignment mask.
6149 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6150 * IEM_MEMMAP_F_ALIGN_SSE, and
6151 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6152 * Pass zero to skip alignment.
6153 */
6154VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6155 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6156{
6157 /*
6158 * Check the input and figure out which mapping entry to use.
6159 */
6160 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6161 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6162 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6163 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6164 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6165
6166 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6167 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6168 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6169 {
6170 iMemMap = iemMemMapFindFree(pVCpu);
6171 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6172 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6173 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6174 pVCpu->iem.s.aMemMappings[2].fAccess),
6175 VERR_IEM_IPE_9);
6176 }
6177
6178 /*
6179 * Map the memory, checking that we can actually access it. If something
6180 * slightly complicated happens, fall back on bounce buffering.
6181 */
6182 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6183 if (rcStrict == VINF_SUCCESS)
6184 { /* likely */ }
6185 else
6186 return rcStrict;
6187
6188 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6189 { /* likely */ }
6190 else
6191 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6192
6193 /*
6194 * Alignment check.
6195 */
6196 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6197 { /* likelyish */ }
6198 else
6199 {
6200 /* Misaligned access. */
6201 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6202 {
6203 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6204 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6205 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6206 {
6207 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6208
6209 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6210 return iemRaiseAlignmentCheckException(pVCpu);
6211 }
6212 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6213 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6214 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6215 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6216 * that's what FXSAVE does on a 10980xe. */
6217 && iemMemAreAlignmentChecksEnabled(pVCpu))
6218 return iemRaiseAlignmentCheckException(pVCpu);
6219 else
6220 return iemRaiseGeneralProtectionFault0(pVCpu);
6221 }
6222 }
6223
6224#ifdef IEM_WITH_DATA_TLB
6225 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6226
6227 /*
6228 * Get the TLB entry for this page.
6229 */
6230 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6231 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6232 if (pTlbe->uTag == uTag)
6233 {
6234# ifdef VBOX_WITH_STATISTICS
6235 pVCpu->iem.s.DataTlb.cTlbHits++;
6236# endif
6237 }
6238 else
6239 {
6240 pVCpu->iem.s.DataTlb.cTlbMisses++;
6241 PGMPTWALK Walk;
6242 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6243 if (RT_FAILURE(rc))
6244 {
6245 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6246# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6247 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6248 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6249# endif
6250 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6251 }
6252
6253 Assert(Walk.fSucceeded);
6254 pTlbe->uTag = uTag;
6255 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6256 pTlbe->GCPhys = Walk.GCPhys;
6257 pTlbe->pbMappingR3 = NULL;
6258 }
6259
6260 /*
6261 * Check TLB page table level access flags.
6262 */
6263 /* If the page is either supervisor only or non-writable, we need to do
6264 more careful access checks. */
6265 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6266 {
6267 /* Write to read only memory? */
6268 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6269 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6270 && ( ( IEM_GET_CPL(pVCpu) == 3
6271 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6272 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6273 {
6274 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6275# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6276 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6277 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6278# endif
6279 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6280 }
6281
6282 /* Kernel memory accessed by userland? */
6283 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6284 && IEM_GET_CPL(pVCpu) == 3
6285 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6286 {
6287 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6288# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6289 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6290 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6291# endif
6292 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6293 }
6294 }
6295
6296 /*
6297 * Set the dirty / access flags.
6298 * ASSUMES this is set when the address is translated rather than on commit...
6299 */
6300 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6301 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6302 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6303 {
6304 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6305 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6306 AssertRC(rc2);
6307 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6308 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6309 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6310 }
6311
6312 /*
6313 * Look up the physical page info if necessary.
6314 */
6315 uint8_t *pbMem = NULL;
6316 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6317# ifdef IN_RING3
6318 pbMem = pTlbe->pbMappingR3;
6319# else
6320 pbMem = NULL;
6321# endif
6322 else
6323 {
6324 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6325 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6326 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6327 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6328 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6329 { /* likely */ }
6330 else
6331 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6332 pTlbe->pbMappingR3 = NULL;
6333 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6334 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6335 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6336 &pbMem, &pTlbe->fFlagsAndPhysRev);
6337 AssertRCReturn(rc, rc);
6338# ifdef IN_RING3
6339 pTlbe->pbMappingR3 = pbMem;
6340# endif
6341 }
6342
6343 /*
6344 * Check the physical page level access and mapping.
6345 */
6346 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6347 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6348 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6349 { /* probably likely */ }
6350 else
6351 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6352 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6353 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6354 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6355 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6356 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6357
6358 if (pbMem)
6359 {
6360 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6361 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6362 fAccess |= IEM_ACCESS_NOT_LOCKED;
6363 }
6364 else
6365 {
6366 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6367 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6368 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6369 if (rcStrict != VINF_SUCCESS)
6370 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6371 }
6372
6373 void * const pvMem = pbMem;
6374
6375 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6376 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6377 if (fAccess & IEM_ACCESS_TYPE_READ)
6378 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6379
6380#else /* !IEM_WITH_DATA_TLB */
6381
6382 RTGCPHYS GCPhysFirst;
6383 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6384 if (rcStrict != VINF_SUCCESS)
6385 return rcStrict;
6386
6387 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6388 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6389 if (fAccess & IEM_ACCESS_TYPE_READ)
6390 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6391
6392 void *pvMem;
6393 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6394 if (rcStrict != VINF_SUCCESS)
6395 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6396
6397#endif /* !IEM_WITH_DATA_TLB */
6398
6399 /*
6400 * Fill in the mapping table entry.
6401 */
6402 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6403 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6404 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6405 pVCpu->iem.s.cActiveMappings += 1;
6406
6407 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6408 *ppvMem = pvMem;
6409
6410 return VINF_SUCCESS;
6411}
6412
6413
6414/**
6415 * Commits the guest memory if bounce buffered and unmaps it.
6416 *
6417 * @returns Strict VBox status code.
6418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6419 * @param pvMem The mapping.
6420 * @param fAccess The kind of access.
6421 */
6422VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6423{
6424 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6425 AssertReturn(iMemMap >= 0, iMemMap);
6426
6427 /* If it's bounce buffered, we may need to write back the buffer. */
6428 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6429 {
6430 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6431 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6432 }
6433 /* Otherwise unlock it. */
6434 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6435 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6436
6437 /* Free the entry. */
6438 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6439 Assert(pVCpu->iem.s.cActiveMappings != 0);
6440 pVCpu->iem.s.cActiveMappings--;
6441 return VINF_SUCCESS;
6442}
6443
6444#ifdef IEM_WITH_SETJMP
6445
6446/**
6447 * Maps the specified guest memory for the given kind of access, longjmp on
6448 * error.
6449 *
6450 * This may be using bounce buffering of the memory if it's crossing a page
6451 * boundary or if there is an access handler installed for any of it. Because
6452 * of lock prefix guarantees, we're in for some extra clutter when this
6453 * happens.
6454 *
6455 * This may raise a \#GP, \#SS, \#PF or \#AC.
6456 *
6457 * @returns Pointer to the mapped memory.
6458 *
6459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6460 * @param cbMem The number of bytes to map. This is usually 1,
6461 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6462 * string operations it can be up to a page.
6463 * @param iSegReg The index of the segment register to use for
6464 * this access. The base and limits are checked.
6465 * Use UINT8_MAX to indicate that no segmentation
6466 * is required (for IDT, GDT and LDT accesses).
6467 * @param GCPtrMem The address of the guest memory.
6468 * @param fAccess How the memory is being accessed. The
6469 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6470 * how to map the memory, while the
6471 * IEM_ACCESS_WHAT_XXX bit is used when raising
6472 * exceptions.
6473 * @param uAlignCtl Alignment control:
6474 * - Bits 15:0 is the alignment mask.
6475 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6476 * IEM_MEMMAP_F_ALIGN_SSE, and
6477 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6478 * Pass zero to skip alignment.
6479 */
6480void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6481 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6482{
6483 /*
6484 * Check the input, check segment access and adjust address
6485 * with segment base.
6486 */
6487 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6488 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6489 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6490
6491 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6492 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6493 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6494
6495 /*
6496 * Alignment check.
6497 */
6498 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6499 { /* likelyish */ }
6500 else
6501 {
6502 /* Misaligned access. */
6503 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6504 {
6505 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6506 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6507 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6508 {
6509 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6510
6511 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6512 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6513 }
6514 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6515 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6516 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6517 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6518 * that's what FXSAVE does on a 10980xe. */
6519 && iemMemAreAlignmentChecksEnabled(pVCpu))
6520 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6521 else
6522 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6523 }
6524 }
6525
6526 /*
6527 * Figure out which mapping entry to use.
6528 */
6529 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6530 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6531 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6532 {
6533 iMemMap = iemMemMapFindFree(pVCpu);
6534 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6535 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6536 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6537 pVCpu->iem.s.aMemMappings[2].fAccess),
6538 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6539 }
6540
6541 /*
6542 * Crossing a page boundary?
6543 */
6544 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6545 { /* No (likely). */ }
6546 else
6547 {
6548 void *pvMem;
6549 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6550 if (rcStrict == VINF_SUCCESS)
6551 return pvMem;
6552 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6553 }
6554
6555#ifdef IEM_WITH_DATA_TLB
6556 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6557
6558 /*
6559 * Get the TLB entry for this page.
6560 */
6561 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6562 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6563 if (pTlbe->uTag == uTag)
6564 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6565 else
6566 {
6567 pVCpu->iem.s.DataTlb.cTlbMisses++;
6568 PGMPTWALK Walk;
6569 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6570 if (RT_FAILURE(rc))
6571 {
6572 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6573# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6574 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6575 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6576# endif
6577 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6578 }
6579
6580 Assert(Walk.fSucceeded);
6581 pTlbe->uTag = uTag;
6582 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6583 pTlbe->GCPhys = Walk.GCPhys;
6584 pTlbe->pbMappingR3 = NULL;
6585 }
6586
6587 /*
6588 * Check the flags and physical revision.
6589 */
6590 /** @todo make the caller pass these in with fAccess. */
6591 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6592 ? IEMTLBE_F_PT_NO_USER : 0;
6593 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6594 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6595 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6596 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6597 ? IEMTLBE_F_PT_NO_WRITE : 0)
6598 : 0;
6599 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6600 uint8_t *pbMem = NULL;
6601 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6602 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6603# ifdef IN_RING3
6604 pbMem = pTlbe->pbMappingR3;
6605# else
6606 pbMem = NULL;
6607# endif
6608 else
6609 {
6610 /*
6611 * Okay, something isn't quite right or needs refreshing.
6612 */
6613 /* Write to read only memory? */
6614 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6615 {
6616 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6617# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6618 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6619 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6620# endif
6621 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6622 }
6623
6624 /* Kernel memory accessed by userland? */
6625 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6626 {
6627 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6628# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6629 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6630 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6631# endif
6632 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6633 }
6634
6635 /* Set the dirty / access flags.
6636 ASSUMES this is set when the address is translated rather than on commit... */
6637 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6638 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6639 {
6640 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6641 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6642 AssertRC(rc2);
6643 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6644 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6645 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6646 }
6647
6648 /*
6649 * Check if the physical page info needs updating.
6650 */
6651 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6652# ifdef IN_RING3
6653 pbMem = pTlbe->pbMappingR3;
6654# else
6655 pbMem = NULL;
6656# endif
6657 else
6658 {
6659 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6660 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6661 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6662 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6663 pTlbe->pbMappingR3 = NULL;
6664 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6665 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6666 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6667 &pbMem, &pTlbe->fFlagsAndPhysRev);
6668 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6669# ifdef IN_RING3
6670 pTlbe->pbMappingR3 = pbMem;
6671# endif
6672 }
6673
6674 /*
6675 * Check the physical page level access and mapping.
6676 */
6677 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6678 { /* probably likely */ }
6679 else
6680 {
6681 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6682 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6683 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6684 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6685 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6686 if (rcStrict == VINF_SUCCESS)
6687 return pbMem;
6688 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6689 }
6690 }
6691 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6692
6693 if (pbMem)
6694 {
6695 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6696 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6697 fAccess |= IEM_ACCESS_NOT_LOCKED;
6698 }
6699 else
6700 {
6701 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6702 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6703 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6704 if (rcStrict == VINF_SUCCESS)
6705 return pbMem;
6706 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6707 }
6708
6709 void * const pvMem = pbMem;
6710
6711 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6712 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6713 if (fAccess & IEM_ACCESS_TYPE_READ)
6714 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6715
6716#else /* !IEM_WITH_DATA_TLB */
6717
6718
6719 RTGCPHYS GCPhysFirst;
6720 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6721 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6722 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6723
6724 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6725 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6726 if (fAccess & IEM_ACCESS_TYPE_READ)
6727 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6728
6729 void *pvMem;
6730 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6731 if (rcStrict == VINF_SUCCESS)
6732 { /* likely */ }
6733 else
6734 {
6735 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6736 if (rcStrict == VINF_SUCCESS)
6737 return pvMem;
6738 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6739 }
6740
6741#endif /* !IEM_WITH_DATA_TLB */
6742
6743 /*
6744 * Fill in the mapping table entry.
6745 */
6746 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6747 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6748 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6749 pVCpu->iem.s.cActiveMappings++;
6750
6751 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6752 return pvMem;
6753}
6754
6755
6756/**
6757 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6758 *
6759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6760 * @param pvMem The mapping.
6761 * @param fAccess The kind of access.
6762 */
6763void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6764{
6765 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6766 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6767
6768 /* If it's bounce buffered, we may need to write back the buffer. */
6769 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6770 {
6771 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6772 {
6773 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6774 if (rcStrict == VINF_SUCCESS)
6775 return;
6776 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6777 }
6778 }
6779 /* Otherwise unlock it. */
6780 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6781 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6782
6783 /* Free the entry. */
6784 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6785 Assert(pVCpu->iem.s.cActiveMappings != 0);
6786 pVCpu->iem.s.cActiveMappings--;
6787}
6788
6789#endif /* IEM_WITH_SETJMP */
6790
6791#ifndef IN_RING3
6792/**
6793 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6794 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6795 *
6796 * Allows the instruction to be completed and retired, while the IEM user will
6797 * return to ring-3 immediately afterwards and do the postponed writes there.
6798 *
6799 * @returns VBox status code (no strict statuses). Caller must check
6800 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6802 * @param pvMem The mapping.
6803 * @param fAccess The kind of access.
6804 */
6805VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6806{
6807 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6808 AssertReturn(iMemMap >= 0, iMemMap);
6809
6810 /* If it's bounce buffered, we may need to write back the buffer. */
6811 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6812 {
6813 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6814 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6815 }
6816 /* Otherwise unlock it. */
6817 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6818 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6819
6820 /* Free the entry. */
6821 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6822 Assert(pVCpu->iem.s.cActiveMappings != 0);
6823 pVCpu->iem.s.cActiveMappings--;
6824 return VINF_SUCCESS;
6825}
6826#endif
6827
6828
6829/**
6830 * Rollbacks mappings, releasing page locks and such.
6831 *
6832 * The caller shall only call this after checking cActiveMappings.
6833 *
6834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6835 */
6836void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6837{
6838 Assert(pVCpu->iem.s.cActiveMappings > 0);
6839
6840 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6841 while (iMemMap-- > 0)
6842 {
6843 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6844 if (fAccess != IEM_ACCESS_INVALID)
6845 {
6846 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6847 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6848 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6849 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6850 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6851 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6852 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6854 pVCpu->iem.s.cActiveMappings--;
6855 }
6856 }
6857}
6858
6859
6860/**
6861 * Fetches a data byte.
6862 *
6863 * @returns Strict VBox status code.
6864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6865 * @param pu8Dst Where to return the byte.
6866 * @param iSegReg The index of the segment register to use for
6867 * this access. The base and limits are checked.
6868 * @param GCPtrMem The address of the guest memory.
6869 */
6870VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6871{
6872 /* The lazy approach for now... */
6873 uint8_t const *pu8Src;
6874 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6875 if (rc == VINF_SUCCESS)
6876 {
6877 *pu8Dst = *pu8Src;
6878 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6879 }
6880 return rc;
6881}
6882
6883
6884#ifdef IEM_WITH_SETJMP
6885/**
6886 * Fetches a data byte, longjmp on error.
6887 *
6888 * @returns The byte.
6889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6890 * @param iSegReg The index of the segment register to use for
6891 * this access. The base and limits are checked.
6892 * @param GCPtrMem The address of the guest memory.
6893 */
6894uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6895{
6896 /* The lazy approach for now... */
6897 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6898 uint8_t const bRet = *pu8Src;
6899 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6900 return bRet;
6901}
6902#endif /* IEM_WITH_SETJMP */
6903
6904
6905/**
6906 * Fetches a data word.
6907 *
6908 * @returns Strict VBox status code.
6909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6910 * @param pu16Dst Where to return the word.
6911 * @param iSegReg The index of the segment register to use for
6912 * this access. The base and limits are checked.
6913 * @param GCPtrMem The address of the guest memory.
6914 */
6915VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6916{
6917 /* The lazy approach for now... */
6918 uint16_t const *pu16Src;
6919 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6920 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6921 if (rc == VINF_SUCCESS)
6922 {
6923 *pu16Dst = *pu16Src;
6924 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6925 }
6926 return rc;
6927}
6928
6929
6930#ifdef IEM_WITH_SETJMP
6931/**
6932 * Fetches a data word, longjmp on error.
6933 *
6934 * @returns The word
6935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6936 * @param iSegReg The index of the segment register to use for
6937 * this access. The base and limits are checked.
6938 * @param GCPtrMem The address of the guest memory.
6939 */
6940uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6941{
6942 /* The lazy approach for now... */
6943 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6944 sizeof(*pu16Src) - 1);
6945 uint16_t const u16Ret = *pu16Src;
6946 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6947 return u16Ret;
6948}
6949#endif
6950
6951
6952/**
6953 * Fetches a data dword.
6954 *
6955 * @returns Strict VBox status code.
6956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6957 * @param pu32Dst Where to return the dword.
6958 * @param iSegReg The index of the segment register to use for
6959 * this access. The base and limits are checked.
6960 * @param GCPtrMem The address of the guest memory.
6961 */
6962VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6963{
6964 /* The lazy approach for now... */
6965 uint32_t const *pu32Src;
6966 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6967 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6968 if (rc == VINF_SUCCESS)
6969 {
6970 *pu32Dst = *pu32Src;
6971 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6972 }
6973 return rc;
6974}
6975
6976
6977/**
6978 * Fetches a data dword and zero extends it to a qword.
6979 *
6980 * @returns Strict VBox status code.
6981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6982 * @param pu64Dst Where to return the qword.
6983 * @param iSegReg The index of the segment register to use for
6984 * this access. The base and limits are checked.
6985 * @param GCPtrMem The address of the guest memory.
6986 */
6987VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6988{
6989 /* The lazy approach for now... */
6990 uint32_t const *pu32Src;
6991 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6992 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6993 if (rc == VINF_SUCCESS)
6994 {
6995 *pu64Dst = *pu32Src;
6996 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6997 }
6998 return rc;
6999}
7000
7001
7002#ifdef IEM_WITH_SETJMP
7003
7004/**
7005 * Fetches a data dword, longjmp on error, fallback/safe version.
7006 *
7007 * @returns The dword
7008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7009 * @param iSegReg The index of the segment register to use for
7010 * this access. The base and limits are checked.
7011 * @param GCPtrMem The address of the guest memory.
7012 */
7013uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7014{
7015 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7016 sizeof(*pu32Src) - 1);
7017 uint32_t const u32Ret = *pu32Src;
7018 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7019 return u32Ret;
7020}
7021
7022
7023/**
7024 * Fetches a data dword, longjmp on error.
7025 *
7026 * @returns The dword
7027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7028 * @param iSegReg The index of the segment register to use for
7029 * this access. The base and limits are checked.
7030 * @param GCPtrMem The address of the guest memory.
7031 */
7032uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7033{
7034# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
7035 /*
7036 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
7037 */
7038 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
7039 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
7040 {
7041 /*
7042 * TLB lookup.
7043 */
7044 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
7045 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
7046 if (pTlbe->uTag == uTag)
7047 {
7048 /*
7049 * Check TLB page table level access flags.
7050 */
7051 uint64_t const fNoUser = IEM_GET_CPL(pVCpu) == 3 ? IEMTLBE_F_PT_NO_USER : 0;
7052 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
7053 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7054 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7055 {
7056 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7057
7058 /*
7059 * Alignment check:
7060 */
7061 /** @todo check priority \#AC vs \#PF */
7062 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7063 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7064 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7065 || IEM_GET_CPL(pVCpu) != 3)
7066 {
7067 /*
7068 * Fetch and return the dword
7069 */
7070 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7071 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7072 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7073 }
7074 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7075 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7076 }
7077 }
7078 }
7079
7080 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7081 outdated page pointer, or other troubles. */
7082 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7083 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7084
7085# else
7086 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7087 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7088 uint32_t const u32Ret = *pu32Src;
7089 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7090 return u32Ret;
7091# endif
7092}
7093#endif
7094
7095
7096#ifdef SOME_UNUSED_FUNCTION
7097/**
7098 * Fetches a data dword and sign extends it to a qword.
7099 *
7100 * @returns Strict VBox status code.
7101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7102 * @param pu64Dst Where to return the sign extended value.
7103 * @param iSegReg The index of the segment register to use for
7104 * this access. The base and limits are checked.
7105 * @param GCPtrMem The address of the guest memory.
7106 */
7107VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7108{
7109 /* The lazy approach for now... */
7110 int32_t const *pi32Src;
7111 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7112 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7113 if (rc == VINF_SUCCESS)
7114 {
7115 *pu64Dst = *pi32Src;
7116 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7117 }
7118#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7119 else
7120 *pu64Dst = 0;
7121#endif
7122 return rc;
7123}
7124#endif
7125
7126
7127/**
7128 * Fetches a data qword.
7129 *
7130 * @returns Strict VBox status code.
7131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7132 * @param pu64Dst Where to return the qword.
7133 * @param iSegReg The index of the segment register to use for
7134 * this access. The base and limits are checked.
7135 * @param GCPtrMem The address of the guest memory.
7136 */
7137VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7138{
7139 /* The lazy approach for now... */
7140 uint64_t const *pu64Src;
7141 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7142 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7143 if (rc == VINF_SUCCESS)
7144 {
7145 *pu64Dst = *pu64Src;
7146 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7147 }
7148 return rc;
7149}
7150
7151
7152#ifdef IEM_WITH_SETJMP
7153/**
7154 * Fetches a data qword, longjmp on error.
7155 *
7156 * @returns The qword.
7157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7158 * @param iSegReg The index of the segment register to use for
7159 * this access. The base and limits are checked.
7160 * @param GCPtrMem The address of the guest memory.
7161 */
7162uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7163{
7164 /* The lazy approach for now... */
7165 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7166 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7167 uint64_t const u64Ret = *pu64Src;
7168 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7169 return u64Ret;
7170}
7171#endif
7172
7173
7174/**
7175 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7176 *
7177 * @returns Strict VBox status code.
7178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7179 * @param pu64Dst Where to return the qword.
7180 * @param iSegReg The index of the segment register to use for
7181 * this access. The base and limits are checked.
7182 * @param GCPtrMem The address of the guest memory.
7183 */
7184VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7185{
7186 /* The lazy approach for now... */
7187 uint64_t const *pu64Src;
7188 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7189 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7190 if (rc == VINF_SUCCESS)
7191 {
7192 *pu64Dst = *pu64Src;
7193 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7194 }
7195 return rc;
7196}
7197
7198
7199#ifdef IEM_WITH_SETJMP
7200/**
7201 * Fetches a data qword, longjmp on error.
7202 *
7203 * @returns The qword.
7204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7205 * @param iSegReg The index of the segment register to use for
7206 * this access. The base and limits are checked.
7207 * @param GCPtrMem The address of the guest memory.
7208 */
7209uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7210{
7211 /* The lazy approach for now... */
7212 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7213 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7214 uint64_t const u64Ret = *pu64Src;
7215 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7216 return u64Ret;
7217}
7218#endif
7219
7220
7221/**
7222 * Fetches a data tword.
7223 *
7224 * @returns Strict VBox status code.
7225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7226 * @param pr80Dst Where to return the tword.
7227 * @param iSegReg The index of the segment register to use for
7228 * this access. The base and limits are checked.
7229 * @param GCPtrMem The address of the guest memory.
7230 */
7231VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7232{
7233 /* The lazy approach for now... */
7234 PCRTFLOAT80U pr80Src;
7235 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7236 if (rc == VINF_SUCCESS)
7237 {
7238 *pr80Dst = *pr80Src;
7239 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7240 }
7241 return rc;
7242}
7243
7244
7245#ifdef IEM_WITH_SETJMP
7246/**
7247 * Fetches a data tword, longjmp on error.
7248 *
7249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7250 * @param pr80Dst Where to return the tword.
7251 * @param iSegReg The index of the segment register to use for
7252 * this access. The base and limits are checked.
7253 * @param GCPtrMem The address of the guest memory.
7254 */
7255void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7256{
7257 /* The lazy approach for now... */
7258 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7259 *pr80Dst = *pr80Src;
7260 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7261}
7262#endif
7263
7264
7265/**
7266 * Fetches a data decimal tword.
7267 *
7268 * @returns Strict VBox status code.
7269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7270 * @param pd80Dst Where to return the tword.
7271 * @param iSegReg The index of the segment register to use for
7272 * this access. The base and limits are checked.
7273 * @param GCPtrMem The address of the guest memory.
7274 */
7275VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7276{
7277 /* The lazy approach for now... */
7278 PCRTPBCD80U pd80Src;
7279 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7280 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7281 if (rc == VINF_SUCCESS)
7282 {
7283 *pd80Dst = *pd80Src;
7284 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7285 }
7286 return rc;
7287}
7288
7289
7290#ifdef IEM_WITH_SETJMP
7291/**
7292 * Fetches a data decimal tword, longjmp on error.
7293 *
7294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7295 * @param pd80Dst Where to return the tword.
7296 * @param iSegReg The index of the segment register to use for
7297 * this access. The base and limits are checked.
7298 * @param GCPtrMem The address of the guest memory.
7299 */
7300void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7301{
7302 /* The lazy approach for now... */
7303 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7304 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7305 *pd80Dst = *pd80Src;
7306 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7307}
7308#endif
7309
7310
7311/**
7312 * Fetches a data dqword (double qword), generally SSE related.
7313 *
7314 * @returns Strict VBox status code.
7315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7316 * @param pu128Dst Where to return the qword.
7317 * @param iSegReg The index of the segment register to use for
7318 * this access. The base and limits are checked.
7319 * @param GCPtrMem The address of the guest memory.
7320 */
7321VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7322{
7323 /* The lazy approach for now... */
7324 PCRTUINT128U pu128Src;
7325 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7326 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7327 if (rc == VINF_SUCCESS)
7328 {
7329 pu128Dst->au64[0] = pu128Src->au64[0];
7330 pu128Dst->au64[1] = pu128Src->au64[1];
7331 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7332 }
7333 return rc;
7334}
7335
7336
7337#ifdef IEM_WITH_SETJMP
7338/**
7339 * Fetches a data dqword (double qword), generally SSE related.
7340 *
7341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7342 * @param pu128Dst Where to return the qword.
7343 * @param iSegReg The index of the segment register to use for
7344 * this access. The base and limits are checked.
7345 * @param GCPtrMem The address of the guest memory.
7346 */
7347void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7348{
7349 /* The lazy approach for now... */
7350 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7351 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7352 pu128Dst->au64[0] = pu128Src->au64[0];
7353 pu128Dst->au64[1] = pu128Src->au64[1];
7354 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7355}
7356#endif
7357
7358
7359/**
7360 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7361 * related.
7362 *
7363 * Raises \#GP(0) if not aligned.
7364 *
7365 * @returns Strict VBox status code.
7366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7367 * @param pu128Dst Where to return the qword.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 */
7372VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7373{
7374 /* The lazy approach for now... */
7375 PCRTUINT128U pu128Src;
7376 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7377 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7378 if (rc == VINF_SUCCESS)
7379 {
7380 pu128Dst->au64[0] = pu128Src->au64[0];
7381 pu128Dst->au64[1] = pu128Src->au64[1];
7382 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7383 }
7384 return rc;
7385}
7386
7387
7388#ifdef IEM_WITH_SETJMP
7389/**
7390 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7391 * related, longjmp on error.
7392 *
7393 * Raises \#GP(0) if not aligned.
7394 *
7395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7396 * @param pu128Dst Where to return the qword.
7397 * @param iSegReg The index of the segment register to use for
7398 * this access. The base and limits are checked.
7399 * @param GCPtrMem The address of the guest memory.
7400 */
7401void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7402 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7403{
7404 /* The lazy approach for now... */
7405 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7406 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7407 pu128Dst->au64[0] = pu128Src->au64[0];
7408 pu128Dst->au64[1] = pu128Src->au64[1];
7409 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7410}
7411#endif
7412
7413
7414/**
7415 * Fetches a data oword (octo word), generally AVX related.
7416 *
7417 * @returns Strict VBox status code.
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param pu256Dst Where to return the qword.
7420 * @param iSegReg The index of the segment register to use for
7421 * this access. The base and limits are checked.
7422 * @param GCPtrMem The address of the guest memory.
7423 */
7424VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7425{
7426 /* The lazy approach for now... */
7427 PCRTUINT256U pu256Src;
7428 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7429 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7430 if (rc == VINF_SUCCESS)
7431 {
7432 pu256Dst->au64[0] = pu256Src->au64[0];
7433 pu256Dst->au64[1] = pu256Src->au64[1];
7434 pu256Dst->au64[2] = pu256Src->au64[2];
7435 pu256Dst->au64[3] = pu256Src->au64[3];
7436 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7437 }
7438 return rc;
7439}
7440
7441
7442#ifdef IEM_WITH_SETJMP
7443/**
7444 * Fetches a data oword (octo word), generally AVX related.
7445 *
7446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7447 * @param pu256Dst Where to return the qword.
7448 * @param iSegReg The index of the segment register to use for
7449 * this access. The base and limits are checked.
7450 * @param GCPtrMem The address of the guest memory.
7451 */
7452void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7453{
7454 /* The lazy approach for now... */
7455 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7456 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7457 pu256Dst->au64[0] = pu256Src->au64[0];
7458 pu256Dst->au64[1] = pu256Src->au64[1];
7459 pu256Dst->au64[2] = pu256Src->au64[2];
7460 pu256Dst->au64[3] = pu256Src->au64[3];
7461 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7462}
7463#endif
7464
7465
7466/**
7467 * Fetches a data oword (octo word) at an aligned address, generally AVX
7468 * related.
7469 *
7470 * Raises \#GP(0) if not aligned.
7471 *
7472 * @returns Strict VBox status code.
7473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7474 * @param pu256Dst Where to return the qword.
7475 * @param iSegReg The index of the segment register to use for
7476 * this access. The base and limits are checked.
7477 * @param GCPtrMem The address of the guest memory.
7478 */
7479VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7480{
7481 /* The lazy approach for now... */
7482 PCRTUINT256U pu256Src;
7483 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7484 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7485 if (rc == VINF_SUCCESS)
7486 {
7487 pu256Dst->au64[0] = pu256Src->au64[0];
7488 pu256Dst->au64[1] = pu256Src->au64[1];
7489 pu256Dst->au64[2] = pu256Src->au64[2];
7490 pu256Dst->au64[3] = pu256Src->au64[3];
7491 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7492 }
7493 return rc;
7494}
7495
7496
7497#ifdef IEM_WITH_SETJMP
7498/**
7499 * Fetches a data oword (octo word) at an aligned address, generally AVX
7500 * related, longjmp on error.
7501 *
7502 * Raises \#GP(0) if not aligned.
7503 *
7504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7505 * @param pu256Dst Where to return the qword.
7506 * @param iSegReg The index of the segment register to use for
7507 * this access. The base and limits are checked.
7508 * @param GCPtrMem The address of the guest memory.
7509 */
7510void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7511 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7512{
7513 /* The lazy approach for now... */
7514 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7515 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7516 pu256Dst->au64[0] = pu256Src->au64[0];
7517 pu256Dst->au64[1] = pu256Src->au64[1];
7518 pu256Dst->au64[2] = pu256Src->au64[2];
7519 pu256Dst->au64[3] = pu256Src->au64[3];
7520 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7521}
7522#endif
7523
7524
7525
7526/**
7527 * Fetches a descriptor register (lgdt, lidt).
7528 *
7529 * @returns Strict VBox status code.
7530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7531 * @param pcbLimit Where to return the limit.
7532 * @param pGCPtrBase Where to return the base.
7533 * @param iSegReg The index of the segment register to use for
7534 * this access. The base and limits are checked.
7535 * @param GCPtrMem The address of the guest memory.
7536 * @param enmOpSize The effective operand size.
7537 */
7538VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7539 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7540{
7541 /*
7542 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7543 * little special:
7544 * - The two reads are done separately.
7545 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7546 * - We suspect the 386 to actually commit the limit before the base in
7547 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7548 * don't try emulate this eccentric behavior, because it's not well
7549 * enough understood and rather hard to trigger.
7550 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7551 */
7552 VBOXSTRICTRC rcStrict;
7553 if (IEM_IS_64BIT_CODE(pVCpu))
7554 {
7555 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7556 if (rcStrict == VINF_SUCCESS)
7557 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7558 }
7559 else
7560 {
7561 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7562 if (enmOpSize == IEMMODE_32BIT)
7563 {
7564 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7565 {
7566 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7567 if (rcStrict == VINF_SUCCESS)
7568 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7569 }
7570 else
7571 {
7572 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7573 if (rcStrict == VINF_SUCCESS)
7574 {
7575 *pcbLimit = (uint16_t)uTmp;
7576 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7577 }
7578 }
7579 if (rcStrict == VINF_SUCCESS)
7580 *pGCPtrBase = uTmp;
7581 }
7582 else
7583 {
7584 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7585 if (rcStrict == VINF_SUCCESS)
7586 {
7587 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7588 if (rcStrict == VINF_SUCCESS)
7589 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7590 }
7591 }
7592 }
7593 return rcStrict;
7594}
7595
7596
7597
7598/**
7599 * Stores a data byte.
7600 *
7601 * @returns Strict VBox status code.
7602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7603 * @param iSegReg The index of the segment register to use for
7604 * this access. The base and limits are checked.
7605 * @param GCPtrMem The address of the guest memory.
7606 * @param u8Value The value to store.
7607 */
7608VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7609{
7610 /* The lazy approach for now... */
7611 uint8_t *pu8Dst;
7612 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7613 if (rc == VINF_SUCCESS)
7614 {
7615 *pu8Dst = u8Value;
7616 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7617 }
7618 return rc;
7619}
7620
7621
7622#ifdef IEM_WITH_SETJMP
7623/**
7624 * Stores a data byte, longjmp on error.
7625 *
7626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7627 * @param iSegReg The index of the segment register to use for
7628 * this access. The base and limits are checked.
7629 * @param GCPtrMem The address of the guest memory.
7630 * @param u8Value The value to store.
7631 */
7632void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7633{
7634 /* The lazy approach for now... */
7635 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7636 *pu8Dst = u8Value;
7637 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7638}
7639#endif
7640
7641
7642/**
7643 * Stores a data word.
7644 *
7645 * @returns Strict VBox status code.
7646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7647 * @param iSegReg The index of the segment register to use for
7648 * this access. The base and limits are checked.
7649 * @param GCPtrMem The address of the guest memory.
7650 * @param u16Value The value to store.
7651 */
7652VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7653{
7654 /* The lazy approach for now... */
7655 uint16_t *pu16Dst;
7656 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7657 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7658 if (rc == VINF_SUCCESS)
7659 {
7660 *pu16Dst = u16Value;
7661 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7662 }
7663 return rc;
7664}
7665
7666
7667#ifdef IEM_WITH_SETJMP
7668/**
7669 * Stores a data word, longjmp on error.
7670 *
7671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7672 * @param iSegReg The index of the segment register to use for
7673 * this access. The base and limits are checked.
7674 * @param GCPtrMem The address of the guest memory.
7675 * @param u16Value The value to store.
7676 */
7677void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7678{
7679 /* The lazy approach for now... */
7680 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7681 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7682 *pu16Dst = u16Value;
7683 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7684}
7685#endif
7686
7687
7688/**
7689 * Stores a data dword.
7690 *
7691 * @returns Strict VBox status code.
7692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7693 * @param iSegReg The index of the segment register to use for
7694 * this access. The base and limits are checked.
7695 * @param GCPtrMem The address of the guest memory.
7696 * @param u32Value The value to store.
7697 */
7698VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7699{
7700 /* The lazy approach for now... */
7701 uint32_t *pu32Dst;
7702 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7703 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7704 if (rc == VINF_SUCCESS)
7705 {
7706 *pu32Dst = u32Value;
7707 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7708 }
7709 return rc;
7710}
7711
7712
7713#ifdef IEM_WITH_SETJMP
7714/**
7715 * Stores a data dword.
7716 *
7717 * @returns Strict VBox status code.
7718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7719 * @param iSegReg The index of the segment register to use for
7720 * this access. The base and limits are checked.
7721 * @param GCPtrMem The address of the guest memory.
7722 * @param u32Value The value to store.
7723 */
7724void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7725{
7726 /* The lazy approach for now... */
7727 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7728 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7729 *pu32Dst = u32Value;
7730 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7731}
7732#endif
7733
7734
7735/**
7736 * Stores a data qword.
7737 *
7738 * @returns Strict VBox status code.
7739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7740 * @param iSegReg The index of the segment register to use for
7741 * this access. The base and limits are checked.
7742 * @param GCPtrMem The address of the guest memory.
7743 * @param u64Value The value to store.
7744 */
7745VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7746{
7747 /* The lazy approach for now... */
7748 uint64_t *pu64Dst;
7749 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7750 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7751 if (rc == VINF_SUCCESS)
7752 {
7753 *pu64Dst = u64Value;
7754 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7755 }
7756 return rc;
7757}
7758
7759
7760#ifdef IEM_WITH_SETJMP
7761/**
7762 * Stores a data qword, longjmp on error.
7763 *
7764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7765 * @param iSegReg The index of the segment register to use for
7766 * this access. The base and limits are checked.
7767 * @param GCPtrMem The address of the guest memory.
7768 * @param u64Value The value to store.
7769 */
7770void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7771{
7772 /* The lazy approach for now... */
7773 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7774 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7775 *pu64Dst = u64Value;
7776 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7777}
7778#endif
7779
7780
7781/**
7782 * Stores a data dqword.
7783 *
7784 * @returns Strict VBox status code.
7785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7786 * @param iSegReg The index of the segment register to use for
7787 * this access. The base and limits are checked.
7788 * @param GCPtrMem The address of the guest memory.
7789 * @param u128Value The value to store.
7790 */
7791VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7792{
7793 /* The lazy approach for now... */
7794 PRTUINT128U pu128Dst;
7795 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7796 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7797 if (rc == VINF_SUCCESS)
7798 {
7799 pu128Dst->au64[0] = u128Value.au64[0];
7800 pu128Dst->au64[1] = u128Value.au64[1];
7801 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7802 }
7803 return rc;
7804}
7805
7806
7807#ifdef IEM_WITH_SETJMP
7808/**
7809 * Stores a data dqword, longjmp on error.
7810 *
7811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7812 * @param iSegReg The index of the segment register to use for
7813 * this access. The base and limits are checked.
7814 * @param GCPtrMem The address of the guest memory.
7815 * @param u128Value The value to store.
7816 */
7817void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7818{
7819 /* The lazy approach for now... */
7820 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7821 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7822 pu128Dst->au64[0] = u128Value.au64[0];
7823 pu128Dst->au64[1] = u128Value.au64[1];
7824 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7825}
7826#endif
7827
7828
7829/**
7830 * Stores a data dqword, SSE aligned.
7831 *
7832 * @returns Strict VBox status code.
7833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7834 * @param iSegReg The index of the segment register to use for
7835 * this access. The base and limits are checked.
7836 * @param GCPtrMem The address of the guest memory.
7837 * @param u128Value The value to store.
7838 */
7839VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7840{
7841 /* The lazy approach for now... */
7842 PRTUINT128U pu128Dst;
7843 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7844 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7845 if (rc == VINF_SUCCESS)
7846 {
7847 pu128Dst->au64[0] = u128Value.au64[0];
7848 pu128Dst->au64[1] = u128Value.au64[1];
7849 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7850 }
7851 return rc;
7852}
7853
7854
7855#ifdef IEM_WITH_SETJMP
7856/**
7857 * Stores a data dqword, SSE aligned.
7858 *
7859 * @returns Strict VBox status code.
7860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7861 * @param iSegReg The index of the segment register to use for
7862 * this access. The base and limits are checked.
7863 * @param GCPtrMem The address of the guest memory.
7864 * @param u128Value The value to store.
7865 */
7866void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7867 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7868{
7869 /* The lazy approach for now... */
7870 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7871 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7872 pu128Dst->au64[0] = u128Value.au64[0];
7873 pu128Dst->au64[1] = u128Value.au64[1];
7874 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7875}
7876#endif
7877
7878
7879/**
7880 * Stores a data dqword.
7881 *
7882 * @returns Strict VBox status code.
7883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7884 * @param iSegReg The index of the segment register to use for
7885 * this access. The base and limits are checked.
7886 * @param GCPtrMem The address of the guest memory.
7887 * @param pu256Value Pointer to the value to store.
7888 */
7889VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7890{
7891 /* The lazy approach for now... */
7892 PRTUINT256U pu256Dst;
7893 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7894 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7895 if (rc == VINF_SUCCESS)
7896 {
7897 pu256Dst->au64[0] = pu256Value->au64[0];
7898 pu256Dst->au64[1] = pu256Value->au64[1];
7899 pu256Dst->au64[2] = pu256Value->au64[2];
7900 pu256Dst->au64[3] = pu256Value->au64[3];
7901 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7902 }
7903 return rc;
7904}
7905
7906
7907#ifdef IEM_WITH_SETJMP
7908/**
7909 * Stores a data dqword, longjmp on error.
7910 *
7911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7912 * @param iSegReg The index of the segment register to use for
7913 * this access. The base and limits are checked.
7914 * @param GCPtrMem The address of the guest memory.
7915 * @param pu256Value Pointer to the value to store.
7916 */
7917void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7918{
7919 /* The lazy approach for now... */
7920 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7921 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7922 pu256Dst->au64[0] = pu256Value->au64[0];
7923 pu256Dst->au64[1] = pu256Value->au64[1];
7924 pu256Dst->au64[2] = pu256Value->au64[2];
7925 pu256Dst->au64[3] = pu256Value->au64[3];
7926 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7927}
7928#endif
7929
7930
7931/**
7932 * Stores a data dqword, AVX \#GP(0) aligned.
7933 *
7934 * @returns Strict VBox status code.
7935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7936 * @param iSegReg The index of the segment register to use for
7937 * this access. The base and limits are checked.
7938 * @param GCPtrMem The address of the guest memory.
7939 * @param pu256Value Pointer to the value to store.
7940 */
7941VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7942{
7943 /* The lazy approach for now... */
7944 PRTUINT256U pu256Dst;
7945 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7946 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7947 if (rc == VINF_SUCCESS)
7948 {
7949 pu256Dst->au64[0] = pu256Value->au64[0];
7950 pu256Dst->au64[1] = pu256Value->au64[1];
7951 pu256Dst->au64[2] = pu256Value->au64[2];
7952 pu256Dst->au64[3] = pu256Value->au64[3];
7953 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7954 }
7955 return rc;
7956}
7957
7958
7959#ifdef IEM_WITH_SETJMP
7960/**
7961 * Stores a data dqword, AVX aligned.
7962 *
7963 * @returns Strict VBox status code.
7964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7965 * @param iSegReg The index of the segment register to use for
7966 * this access. The base and limits are checked.
7967 * @param GCPtrMem The address of the guest memory.
7968 * @param pu256Value Pointer to the value to store.
7969 */
7970void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7971 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7972{
7973 /* The lazy approach for now... */
7974 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7975 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7976 pu256Dst->au64[0] = pu256Value->au64[0];
7977 pu256Dst->au64[1] = pu256Value->au64[1];
7978 pu256Dst->au64[2] = pu256Value->au64[2];
7979 pu256Dst->au64[3] = pu256Value->au64[3];
7980 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7981}
7982#endif
7983
7984
7985/**
7986 * Stores a descriptor register (sgdt, sidt).
7987 *
7988 * @returns Strict VBox status code.
7989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7990 * @param cbLimit The limit.
7991 * @param GCPtrBase The base address.
7992 * @param iSegReg The index of the segment register to use for
7993 * this access. The base and limits are checked.
7994 * @param GCPtrMem The address of the guest memory.
7995 */
7996VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7997{
7998 /*
7999 * The SIDT and SGDT instructions actually stores the data using two
8000 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
8001 * does not respond to opsize prefixes.
8002 */
8003 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
8004 if (rcStrict == VINF_SUCCESS)
8005 {
8006 if (IEM_IS_16BIT_CODE(pVCpu))
8007 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
8008 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
8009 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
8010 else if (IEM_IS_32BIT_CODE(pVCpu))
8011 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
8012 else
8013 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
8014 }
8015 return rcStrict;
8016}
8017
8018
8019/**
8020 * Pushes a word onto the stack.
8021 *
8022 * @returns Strict VBox status code.
8023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8024 * @param u16Value The value to push.
8025 */
8026VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
8027{
8028 /* Increment the stack pointer. */
8029 uint64_t uNewRsp;
8030 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
8031
8032 /* Write the word the lazy way. */
8033 uint16_t *pu16Dst;
8034 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8035 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8036 if (rc == VINF_SUCCESS)
8037 {
8038 *pu16Dst = u16Value;
8039 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8040 }
8041
8042 /* Commit the new RSP value unless we an access handler made trouble. */
8043 if (rc == VINF_SUCCESS)
8044 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8045
8046 return rc;
8047}
8048
8049
8050/**
8051 * Pushes a dword onto the stack.
8052 *
8053 * @returns Strict VBox status code.
8054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8055 * @param u32Value The value to push.
8056 */
8057VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8058{
8059 /* Increment the stack pointer. */
8060 uint64_t uNewRsp;
8061 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8062
8063 /* Write the dword the lazy way. */
8064 uint32_t *pu32Dst;
8065 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8066 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8067 if (rc == VINF_SUCCESS)
8068 {
8069 *pu32Dst = u32Value;
8070 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8071 }
8072
8073 /* Commit the new RSP value unless we an access handler made trouble. */
8074 if (rc == VINF_SUCCESS)
8075 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8076
8077 return rc;
8078}
8079
8080
8081/**
8082 * Pushes a dword segment register value onto the stack.
8083 *
8084 * @returns Strict VBox status code.
8085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8086 * @param u32Value The value to push.
8087 */
8088VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8089{
8090 /* Increment the stack pointer. */
8091 uint64_t uNewRsp;
8092 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8093
8094 /* The intel docs talks about zero extending the selector register
8095 value. My actual intel CPU here might be zero extending the value
8096 but it still only writes the lower word... */
8097 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8098 * happens when crossing an electric page boundrary, is the high word checked
8099 * for write accessibility or not? Probably it is. What about segment limits?
8100 * It appears this behavior is also shared with trap error codes.
8101 *
8102 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8103 * ancient hardware when it actually did change. */
8104 uint16_t *pu16Dst;
8105 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8106 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8107 if (rc == VINF_SUCCESS)
8108 {
8109 *pu16Dst = (uint16_t)u32Value;
8110 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8111 }
8112
8113 /* Commit the new RSP value unless we an access handler made trouble. */
8114 if (rc == VINF_SUCCESS)
8115 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8116
8117 return rc;
8118}
8119
8120
8121/**
8122 * Pushes a qword onto the stack.
8123 *
8124 * @returns Strict VBox status code.
8125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8126 * @param u64Value The value to push.
8127 */
8128VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8129{
8130 /* Increment the stack pointer. */
8131 uint64_t uNewRsp;
8132 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8133
8134 /* Write the word the lazy way. */
8135 uint64_t *pu64Dst;
8136 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8137 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8138 if (rc == VINF_SUCCESS)
8139 {
8140 *pu64Dst = u64Value;
8141 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8142 }
8143
8144 /* Commit the new RSP value unless we an access handler made trouble. */
8145 if (rc == VINF_SUCCESS)
8146 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8147
8148 return rc;
8149}
8150
8151
8152/**
8153 * Pops a word from the stack.
8154 *
8155 * @returns Strict VBox status code.
8156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8157 * @param pu16Value Where to store the popped value.
8158 */
8159VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8160{
8161 /* Increment the stack pointer. */
8162 uint64_t uNewRsp;
8163 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8164
8165 /* Write the word the lazy way. */
8166 uint16_t const *pu16Src;
8167 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8168 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8169 if (rc == VINF_SUCCESS)
8170 {
8171 *pu16Value = *pu16Src;
8172 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8173
8174 /* Commit the new RSP value. */
8175 if (rc == VINF_SUCCESS)
8176 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8177 }
8178
8179 return rc;
8180}
8181
8182
8183/**
8184 * Pops a dword from the stack.
8185 *
8186 * @returns Strict VBox status code.
8187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8188 * @param pu32Value Where to store the popped value.
8189 */
8190VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8191{
8192 /* Increment the stack pointer. */
8193 uint64_t uNewRsp;
8194 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8195
8196 /* Write the word the lazy way. */
8197 uint32_t const *pu32Src;
8198 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8199 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8200 if (rc == VINF_SUCCESS)
8201 {
8202 *pu32Value = *pu32Src;
8203 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8204
8205 /* Commit the new RSP value. */
8206 if (rc == VINF_SUCCESS)
8207 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8208 }
8209
8210 return rc;
8211}
8212
8213
8214/**
8215 * Pops a qword from the stack.
8216 *
8217 * @returns Strict VBox status code.
8218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8219 * @param pu64Value Where to store the popped value.
8220 */
8221VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8222{
8223 /* Increment the stack pointer. */
8224 uint64_t uNewRsp;
8225 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8226
8227 /* Write the word the lazy way. */
8228 uint64_t const *pu64Src;
8229 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8230 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8231 if (rc == VINF_SUCCESS)
8232 {
8233 *pu64Value = *pu64Src;
8234 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8235
8236 /* Commit the new RSP value. */
8237 if (rc == VINF_SUCCESS)
8238 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8239 }
8240
8241 return rc;
8242}
8243
8244
8245/**
8246 * Pushes a word onto the stack, using a temporary stack pointer.
8247 *
8248 * @returns Strict VBox status code.
8249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8250 * @param u16Value The value to push.
8251 * @param pTmpRsp Pointer to the temporary stack pointer.
8252 */
8253VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8254{
8255 /* Increment the stack pointer. */
8256 RTUINT64U NewRsp = *pTmpRsp;
8257 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8258
8259 /* Write the word the lazy way. */
8260 uint16_t *pu16Dst;
8261 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8262 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8263 if (rc == VINF_SUCCESS)
8264 {
8265 *pu16Dst = u16Value;
8266 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8267 }
8268
8269 /* Commit the new RSP value unless we an access handler made trouble. */
8270 if (rc == VINF_SUCCESS)
8271 *pTmpRsp = NewRsp;
8272
8273 return rc;
8274}
8275
8276
8277/**
8278 * Pushes a dword onto the stack, using a temporary stack pointer.
8279 *
8280 * @returns Strict VBox status code.
8281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8282 * @param u32Value The value to push.
8283 * @param pTmpRsp Pointer to the temporary stack pointer.
8284 */
8285VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8286{
8287 /* Increment the stack pointer. */
8288 RTUINT64U NewRsp = *pTmpRsp;
8289 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8290
8291 /* Write the word the lazy way. */
8292 uint32_t *pu32Dst;
8293 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8294 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8295 if (rc == VINF_SUCCESS)
8296 {
8297 *pu32Dst = u32Value;
8298 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8299 }
8300
8301 /* Commit the new RSP value unless we an access handler made trouble. */
8302 if (rc == VINF_SUCCESS)
8303 *pTmpRsp = NewRsp;
8304
8305 return rc;
8306}
8307
8308
8309/**
8310 * Pushes a dword onto the stack, using a temporary stack pointer.
8311 *
8312 * @returns Strict VBox status code.
8313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8314 * @param u64Value The value to push.
8315 * @param pTmpRsp Pointer to the temporary stack pointer.
8316 */
8317VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8318{
8319 /* Increment the stack pointer. */
8320 RTUINT64U NewRsp = *pTmpRsp;
8321 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8322
8323 /* Write the word the lazy way. */
8324 uint64_t *pu64Dst;
8325 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8326 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8327 if (rc == VINF_SUCCESS)
8328 {
8329 *pu64Dst = u64Value;
8330 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8331 }
8332
8333 /* Commit the new RSP value unless we an access handler made trouble. */
8334 if (rc == VINF_SUCCESS)
8335 *pTmpRsp = NewRsp;
8336
8337 return rc;
8338}
8339
8340
8341/**
8342 * Pops a word from the stack, using a temporary stack pointer.
8343 *
8344 * @returns Strict VBox status code.
8345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8346 * @param pu16Value Where to store the popped value.
8347 * @param pTmpRsp Pointer to the temporary stack pointer.
8348 */
8349VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8350{
8351 /* Increment the stack pointer. */
8352 RTUINT64U NewRsp = *pTmpRsp;
8353 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8354
8355 /* Write the word the lazy way. */
8356 uint16_t const *pu16Src;
8357 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8358 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8359 if (rc == VINF_SUCCESS)
8360 {
8361 *pu16Value = *pu16Src;
8362 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8363
8364 /* Commit the new RSP value. */
8365 if (rc == VINF_SUCCESS)
8366 *pTmpRsp = NewRsp;
8367 }
8368
8369 return rc;
8370}
8371
8372
8373/**
8374 * Pops a dword from the stack, using a temporary stack pointer.
8375 *
8376 * @returns Strict VBox status code.
8377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8378 * @param pu32Value Where to store the popped value.
8379 * @param pTmpRsp Pointer to the temporary stack pointer.
8380 */
8381VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8382{
8383 /* Increment the stack pointer. */
8384 RTUINT64U NewRsp = *pTmpRsp;
8385 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8386
8387 /* Write the word the lazy way. */
8388 uint32_t const *pu32Src;
8389 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8390 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8391 if (rc == VINF_SUCCESS)
8392 {
8393 *pu32Value = *pu32Src;
8394 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8395
8396 /* Commit the new RSP value. */
8397 if (rc == VINF_SUCCESS)
8398 *pTmpRsp = NewRsp;
8399 }
8400
8401 return rc;
8402}
8403
8404
8405/**
8406 * Pops a qword from the stack, using a temporary stack pointer.
8407 *
8408 * @returns Strict VBox status code.
8409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8410 * @param pu64Value Where to store the popped value.
8411 * @param pTmpRsp Pointer to the temporary stack pointer.
8412 */
8413VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8414{
8415 /* Increment the stack pointer. */
8416 RTUINT64U NewRsp = *pTmpRsp;
8417 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8418
8419 /* Write the word the lazy way. */
8420 uint64_t const *pu64Src;
8421 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8422 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8423 if (rcStrict == VINF_SUCCESS)
8424 {
8425 *pu64Value = *pu64Src;
8426 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8427
8428 /* Commit the new RSP value. */
8429 if (rcStrict == VINF_SUCCESS)
8430 *pTmpRsp = NewRsp;
8431 }
8432
8433 return rcStrict;
8434}
8435
8436
8437/**
8438 * Begin a special stack push (used by interrupt, exceptions and such).
8439 *
8440 * This will raise \#SS or \#PF if appropriate.
8441 *
8442 * @returns Strict VBox status code.
8443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8444 * @param cbMem The number of bytes to push onto the stack.
8445 * @param cbAlign The alignment mask (7, 3, 1).
8446 * @param ppvMem Where to return the pointer to the stack memory.
8447 * As with the other memory functions this could be
8448 * direct access or bounce buffered access, so
8449 * don't commit register until the commit call
8450 * succeeds.
8451 * @param puNewRsp Where to return the new RSP value. This must be
8452 * passed unchanged to
8453 * iemMemStackPushCommitSpecial().
8454 */
8455VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8456 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8457{
8458 Assert(cbMem < UINT8_MAX);
8459 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8460 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8461 IEM_ACCESS_STACK_W, cbAlign);
8462}
8463
8464
8465/**
8466 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8467 *
8468 * This will update the rSP.
8469 *
8470 * @returns Strict VBox status code.
8471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8472 * @param pvMem The pointer returned by
8473 * iemMemStackPushBeginSpecial().
8474 * @param uNewRsp The new RSP value returned by
8475 * iemMemStackPushBeginSpecial().
8476 */
8477VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8478{
8479 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8480 if (rcStrict == VINF_SUCCESS)
8481 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8482 return rcStrict;
8483}
8484
8485
8486/**
8487 * Begin a special stack pop (used by iret, retf and such).
8488 *
8489 * This will raise \#SS or \#PF if appropriate.
8490 *
8491 * @returns Strict VBox status code.
8492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8493 * @param cbMem The number of bytes to pop from the stack.
8494 * @param cbAlign The alignment mask (7, 3, 1).
8495 * @param ppvMem Where to return the pointer to the stack memory.
8496 * @param puNewRsp Where to return the new RSP value. This must be
8497 * assigned to CPUMCTX::rsp manually some time
8498 * after iemMemStackPopDoneSpecial() has been
8499 * called.
8500 */
8501VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8502 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8503{
8504 Assert(cbMem < UINT8_MAX);
8505 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8506 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8507}
8508
8509
8510/**
8511 * Continue a special stack pop (used by iret and retf), for the purpose of
8512 * retrieving a new stack pointer.
8513 *
8514 * This will raise \#SS or \#PF if appropriate.
8515 *
8516 * @returns Strict VBox status code.
8517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8518 * @param off Offset from the top of the stack. This is zero
8519 * except in the retf case.
8520 * @param cbMem The number of bytes to pop from the stack.
8521 * @param ppvMem Where to return the pointer to the stack memory.
8522 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8523 * return this because all use of this function is
8524 * to retrieve a new value and anything we return
8525 * here would be discarded.)
8526 */
8527VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8528 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8529{
8530 Assert(cbMem < UINT8_MAX);
8531
8532 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8533 RTGCPTR GCPtrTop;
8534 if (IEM_IS_64BIT_CODE(pVCpu))
8535 GCPtrTop = uCurNewRsp;
8536 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8537 GCPtrTop = (uint32_t)uCurNewRsp;
8538 else
8539 GCPtrTop = (uint16_t)uCurNewRsp;
8540
8541 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8542 0 /* checked in iemMemStackPopBeginSpecial */);
8543}
8544
8545
8546/**
8547 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8548 * iemMemStackPopContinueSpecial).
8549 *
8550 * The caller will manually commit the rSP.
8551 *
8552 * @returns Strict VBox status code.
8553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8554 * @param pvMem The pointer returned by
8555 * iemMemStackPopBeginSpecial() or
8556 * iemMemStackPopContinueSpecial().
8557 */
8558VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8559{
8560 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8561}
8562
8563
8564/**
8565 * Fetches a system table byte.
8566 *
8567 * @returns Strict VBox status code.
8568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8569 * @param pbDst Where to return the byte.
8570 * @param iSegReg The index of the segment register to use for
8571 * this access. The base and limits are checked.
8572 * @param GCPtrMem The address of the guest memory.
8573 */
8574VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8575{
8576 /* The lazy approach for now... */
8577 uint8_t const *pbSrc;
8578 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8579 if (rc == VINF_SUCCESS)
8580 {
8581 *pbDst = *pbSrc;
8582 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8583 }
8584 return rc;
8585}
8586
8587
8588/**
8589 * Fetches a system table word.
8590 *
8591 * @returns Strict VBox status code.
8592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8593 * @param pu16Dst Where to return the word.
8594 * @param iSegReg The index of the segment register to use for
8595 * this access. The base and limits are checked.
8596 * @param GCPtrMem The address of the guest memory.
8597 */
8598VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8599{
8600 /* The lazy approach for now... */
8601 uint16_t const *pu16Src;
8602 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8603 if (rc == VINF_SUCCESS)
8604 {
8605 *pu16Dst = *pu16Src;
8606 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8607 }
8608 return rc;
8609}
8610
8611
8612/**
8613 * Fetches a system table dword.
8614 *
8615 * @returns Strict VBox status code.
8616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8617 * @param pu32Dst Where to return the dword.
8618 * @param iSegReg The index of the segment register to use for
8619 * this access. The base and limits are checked.
8620 * @param GCPtrMem The address of the guest memory.
8621 */
8622VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8623{
8624 /* The lazy approach for now... */
8625 uint32_t const *pu32Src;
8626 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8627 if (rc == VINF_SUCCESS)
8628 {
8629 *pu32Dst = *pu32Src;
8630 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8631 }
8632 return rc;
8633}
8634
8635
8636/**
8637 * Fetches a system table qword.
8638 *
8639 * @returns Strict VBox status code.
8640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8641 * @param pu64Dst Where to return the qword.
8642 * @param iSegReg The index of the segment register to use for
8643 * this access. The base and limits are checked.
8644 * @param GCPtrMem The address of the guest memory.
8645 */
8646VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8647{
8648 /* The lazy approach for now... */
8649 uint64_t const *pu64Src;
8650 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8651 if (rc == VINF_SUCCESS)
8652 {
8653 *pu64Dst = *pu64Src;
8654 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8655 }
8656 return rc;
8657}
8658
8659
8660/**
8661 * Fetches a descriptor table entry with caller specified error code.
8662 *
8663 * @returns Strict VBox status code.
8664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8665 * @param pDesc Where to return the descriptor table entry.
8666 * @param uSel The selector which table entry to fetch.
8667 * @param uXcpt The exception to raise on table lookup error.
8668 * @param uErrorCode The error code associated with the exception.
8669 */
8670static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8671 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8672{
8673 AssertPtr(pDesc);
8674 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8675
8676 /** @todo did the 286 require all 8 bytes to be accessible? */
8677 /*
8678 * Get the selector table base and check bounds.
8679 */
8680 RTGCPTR GCPtrBase;
8681 if (uSel & X86_SEL_LDT)
8682 {
8683 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8684 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8685 {
8686 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8687 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8688 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8689 uErrorCode, 0);
8690 }
8691
8692 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8693 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8694 }
8695 else
8696 {
8697 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8698 {
8699 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8700 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8701 uErrorCode, 0);
8702 }
8703 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8704 }
8705
8706 /*
8707 * Read the legacy descriptor and maybe the long mode extensions if
8708 * required.
8709 */
8710 VBOXSTRICTRC rcStrict;
8711 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8712 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8713 else
8714 {
8715 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8716 if (rcStrict == VINF_SUCCESS)
8717 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8718 if (rcStrict == VINF_SUCCESS)
8719 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8720 if (rcStrict == VINF_SUCCESS)
8721 pDesc->Legacy.au16[3] = 0;
8722 else
8723 return rcStrict;
8724 }
8725
8726 if (rcStrict == VINF_SUCCESS)
8727 {
8728 if ( !IEM_IS_LONG_MODE(pVCpu)
8729 || pDesc->Legacy.Gen.u1DescType)
8730 pDesc->Long.au64[1] = 0;
8731 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8732 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8733 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8734 else
8735 {
8736 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8737 /** @todo is this the right exception? */
8738 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8739 }
8740 }
8741 return rcStrict;
8742}
8743
8744
8745/**
8746 * Fetches a descriptor table entry.
8747 *
8748 * @returns Strict VBox status code.
8749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8750 * @param pDesc Where to return the descriptor table entry.
8751 * @param uSel The selector which table entry to fetch.
8752 * @param uXcpt The exception to raise on table lookup error.
8753 */
8754VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8755{
8756 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8757}
8758
8759
8760/**
8761 * Marks the selector descriptor as accessed (only non-system descriptors).
8762 *
8763 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8764 * will therefore skip the limit checks.
8765 *
8766 * @returns Strict VBox status code.
8767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8768 * @param uSel The selector.
8769 */
8770VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8771{
8772 /*
8773 * Get the selector table base and calculate the entry address.
8774 */
8775 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8776 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8777 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8778 GCPtr += uSel & X86_SEL_MASK;
8779
8780 /*
8781 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8782 * ugly stuff to avoid this. This will make sure it's an atomic access
8783 * as well more or less remove any question about 8-bit or 32-bit accesss.
8784 */
8785 VBOXSTRICTRC rcStrict;
8786 uint32_t volatile *pu32;
8787 if ((GCPtr & 3) == 0)
8788 {
8789 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8790 GCPtr += 2 + 2;
8791 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8792 if (rcStrict != VINF_SUCCESS)
8793 return rcStrict;
8794 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8795 }
8796 else
8797 {
8798 /* The misaligned GDT/LDT case, map the whole thing. */
8799 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8800 if (rcStrict != VINF_SUCCESS)
8801 return rcStrict;
8802 switch ((uintptr_t)pu32 & 3)
8803 {
8804 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8805 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8806 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8807 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8808 }
8809 }
8810
8811 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8812}
8813
8814/** @} */
8815
8816/** @name Opcode Helpers.
8817 * @{
8818 */
8819
8820/**
8821 * Calculates the effective address of a ModR/M memory operand.
8822 *
8823 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8824 *
8825 * @return Strict VBox status code.
8826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8827 * @param bRm The ModRM byte.
8828 * @param cbImmAndRspOffset - First byte: The size of any immediate
8829 * following the effective address opcode bytes
8830 * (only for RIP relative addressing).
8831 * - Second byte: RSP displacement (for POP [ESP]).
8832 * @param pGCPtrEff Where to return the effective address.
8833 */
8834VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8835{
8836 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8837# define SET_SS_DEF() \
8838 do \
8839 { \
8840 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8841 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8842 } while (0)
8843
8844 if (!IEM_IS_64BIT_CODE(pVCpu))
8845 {
8846/** @todo Check the effective address size crap! */
8847 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8848 {
8849 uint16_t u16EffAddr;
8850
8851 /* Handle the disp16 form with no registers first. */
8852 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8853 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8854 else
8855 {
8856 /* Get the displacment. */
8857 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8858 {
8859 case 0: u16EffAddr = 0; break;
8860 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8861 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8862 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8863 }
8864
8865 /* Add the base and index registers to the disp. */
8866 switch (bRm & X86_MODRM_RM_MASK)
8867 {
8868 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8869 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8870 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8871 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8872 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8873 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8874 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8875 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8876 }
8877 }
8878
8879 *pGCPtrEff = u16EffAddr;
8880 }
8881 else
8882 {
8883 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8884 uint32_t u32EffAddr;
8885
8886 /* Handle the disp32 form with no registers first. */
8887 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8888 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8889 else
8890 {
8891 /* Get the register (or SIB) value. */
8892 switch ((bRm & X86_MODRM_RM_MASK))
8893 {
8894 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8895 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8896 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8897 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8898 case 4: /* SIB */
8899 {
8900 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8901
8902 /* Get the index and scale it. */
8903 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8904 {
8905 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8906 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8907 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8908 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8909 case 4: u32EffAddr = 0; /*none */ break;
8910 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8911 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8912 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8914 }
8915 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8916
8917 /* add base */
8918 switch (bSib & X86_SIB_BASE_MASK)
8919 {
8920 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8921 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8922 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8923 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8924 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8925 case 5:
8926 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8927 {
8928 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8929 SET_SS_DEF();
8930 }
8931 else
8932 {
8933 uint32_t u32Disp;
8934 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8935 u32EffAddr += u32Disp;
8936 }
8937 break;
8938 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8939 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8940 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8941 }
8942 break;
8943 }
8944 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8945 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8946 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8947 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8948 }
8949
8950 /* Get and add the displacement. */
8951 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8952 {
8953 case 0:
8954 break;
8955 case 1:
8956 {
8957 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8958 u32EffAddr += i8Disp;
8959 break;
8960 }
8961 case 2:
8962 {
8963 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8964 u32EffAddr += u32Disp;
8965 break;
8966 }
8967 default:
8968 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8969 }
8970
8971 }
8972 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8973 *pGCPtrEff = u32EffAddr;
8974 else
8975 {
8976 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8977 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8978 }
8979 }
8980 }
8981 else
8982 {
8983 uint64_t u64EffAddr;
8984
8985 /* Handle the rip+disp32 form with no registers first. */
8986 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8987 {
8988 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8989 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8990 }
8991 else
8992 {
8993 /* Get the register (or SIB) value. */
8994 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8995 {
8996 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8997 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8998 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8999 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9000 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9001 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9002 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9003 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9004 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9005 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9006 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9007 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9008 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9009 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9010 /* SIB */
9011 case 4:
9012 case 12:
9013 {
9014 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9015
9016 /* Get the index and scale it. */
9017 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9018 {
9019 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9020 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9021 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9022 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9023 case 4: u64EffAddr = 0; /*none */ break;
9024 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9025 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9026 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9027 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9028 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9029 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9030 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9031 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9032 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9033 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9034 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9035 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9036 }
9037 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9038
9039 /* add base */
9040 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9041 {
9042 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9043 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9044 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9045 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9046 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9047 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9048 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9049 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9050 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9051 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9052 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9053 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9054 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9055 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9056 /* complicated encodings */
9057 case 5:
9058 case 13:
9059 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9060 {
9061 if (!pVCpu->iem.s.uRexB)
9062 {
9063 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9064 SET_SS_DEF();
9065 }
9066 else
9067 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9068 }
9069 else
9070 {
9071 uint32_t u32Disp;
9072 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9073 u64EffAddr += (int32_t)u32Disp;
9074 }
9075 break;
9076 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9077 }
9078 break;
9079 }
9080 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9081 }
9082
9083 /* Get and add the displacement. */
9084 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9085 {
9086 case 0:
9087 break;
9088 case 1:
9089 {
9090 int8_t i8Disp;
9091 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9092 u64EffAddr += i8Disp;
9093 break;
9094 }
9095 case 2:
9096 {
9097 uint32_t u32Disp;
9098 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9099 u64EffAddr += (int32_t)u32Disp;
9100 break;
9101 }
9102 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9103 }
9104
9105 }
9106
9107 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9108 *pGCPtrEff = u64EffAddr;
9109 else
9110 {
9111 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9112 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9113 }
9114 }
9115
9116 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9117 return VINF_SUCCESS;
9118}
9119
9120
9121#ifdef IEM_WITH_SETJMP
9122/**
9123 * Calculates the effective address of a ModR/M memory operand.
9124 *
9125 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9126 *
9127 * May longjmp on internal error.
9128 *
9129 * @return The effective address.
9130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9131 * @param bRm The ModRM byte.
9132 * @param cbImmAndRspOffset - First byte: The size of any immediate
9133 * following the effective address opcode bytes
9134 * (only for RIP relative addressing).
9135 * - Second byte: RSP displacement (for POP [ESP]).
9136 */
9137RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
9138{
9139 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9140# define SET_SS_DEF() \
9141 do \
9142 { \
9143 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9144 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9145 } while (0)
9146
9147 if (!IEM_IS_64BIT_CODE(pVCpu))
9148 {
9149/** @todo Check the effective address size crap! */
9150 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9151 {
9152 uint16_t u16EffAddr;
9153
9154 /* Handle the disp16 form with no registers first. */
9155 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9156 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9157 else
9158 {
9159 /* Get the displacment. */
9160 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9161 {
9162 case 0: u16EffAddr = 0; break;
9163 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9164 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9165 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9166 }
9167
9168 /* Add the base and index registers to the disp. */
9169 switch (bRm & X86_MODRM_RM_MASK)
9170 {
9171 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9172 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9173 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9174 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9175 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9176 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9177 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9178 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9179 }
9180 }
9181
9182 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9183 return u16EffAddr;
9184 }
9185
9186 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9187 uint32_t u32EffAddr;
9188
9189 /* Handle the disp32 form with no registers first. */
9190 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9191 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9192 else
9193 {
9194 /* Get the register (or SIB) value. */
9195 switch ((bRm & X86_MODRM_RM_MASK))
9196 {
9197 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9198 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9199 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9200 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9201 case 4: /* SIB */
9202 {
9203 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9204
9205 /* Get the index and scale it. */
9206 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9207 {
9208 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9209 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9210 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9211 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9212 case 4: u32EffAddr = 0; /*none */ break;
9213 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9214 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9215 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9216 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9217 }
9218 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9219
9220 /* add base */
9221 switch (bSib & X86_SIB_BASE_MASK)
9222 {
9223 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9224 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9225 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9226 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9227 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9228 case 5:
9229 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9230 {
9231 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9232 SET_SS_DEF();
9233 }
9234 else
9235 {
9236 uint32_t u32Disp;
9237 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9238 u32EffAddr += u32Disp;
9239 }
9240 break;
9241 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9242 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9243 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9244 }
9245 break;
9246 }
9247 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9248 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9249 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9250 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9251 }
9252
9253 /* Get and add the displacement. */
9254 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9255 {
9256 case 0:
9257 break;
9258 case 1:
9259 {
9260 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9261 u32EffAddr += i8Disp;
9262 break;
9263 }
9264 case 2:
9265 {
9266 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9267 u32EffAddr += u32Disp;
9268 break;
9269 }
9270 default:
9271 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9272 }
9273 }
9274
9275 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9276 {
9277 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9278 return u32EffAddr;
9279 }
9280 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9281 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9282 return u32EffAddr & UINT16_MAX;
9283 }
9284
9285 uint64_t u64EffAddr;
9286
9287 /* Handle the rip+disp32 form with no registers first. */
9288 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9289 {
9290 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9291 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9292 }
9293 else
9294 {
9295 /* Get the register (or SIB) value. */
9296 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9297 {
9298 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9299 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9300 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9301 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9302 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9303 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9304 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9305 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9306 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9307 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9308 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9309 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9310 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9311 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9312 /* SIB */
9313 case 4:
9314 case 12:
9315 {
9316 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9317
9318 /* Get the index and scale it. */
9319 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9320 {
9321 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9322 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9323 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9324 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9325 case 4: u64EffAddr = 0; /*none */ break;
9326 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9327 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9328 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9329 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9330 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9331 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9332 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9333 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9334 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9335 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9336 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9337 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9338 }
9339 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9340
9341 /* add base */
9342 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9343 {
9344 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9345 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9346 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9347 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9348 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9349 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9350 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9351 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9352 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9353 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9354 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9355 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9356 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9357 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9358 /* complicated encodings */
9359 case 5:
9360 case 13:
9361 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9362 {
9363 if (!pVCpu->iem.s.uRexB)
9364 {
9365 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9366 SET_SS_DEF();
9367 }
9368 else
9369 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9370 }
9371 else
9372 {
9373 uint32_t u32Disp;
9374 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9375 u64EffAddr += (int32_t)u32Disp;
9376 }
9377 break;
9378 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9379 }
9380 break;
9381 }
9382 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9383 }
9384
9385 /* Get and add the displacement. */
9386 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9387 {
9388 case 0:
9389 break;
9390 case 1:
9391 {
9392 int8_t i8Disp;
9393 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9394 u64EffAddr += i8Disp;
9395 break;
9396 }
9397 case 2:
9398 {
9399 uint32_t u32Disp;
9400 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9401 u64EffAddr += (int32_t)u32Disp;
9402 break;
9403 }
9404 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9405 }
9406
9407 }
9408
9409 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9410 {
9411 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9412 return u64EffAddr;
9413 }
9414 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9415 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9416 return u64EffAddr & UINT32_MAX;
9417}
9418#endif /* IEM_WITH_SETJMP */
9419
9420
9421/**
9422 * Calculates the effective address of a ModR/M memory operand, extended version
9423 * for use in the recompilers.
9424 *
9425 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9426 *
9427 * @return Strict VBox status code.
9428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9429 * @param bRm The ModRM byte.
9430 * @param cbImmAndRspOffset - First byte: The size of any immediate
9431 * following the effective address opcode bytes
9432 * (only for RIP relative addressing).
9433 * - Second byte: RSP displacement (for POP [ESP]).
9434 * @param pGCPtrEff Where to return the effective address.
9435 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9436 * SIB byte (bits 39:32).
9437 */
9438VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
9439{
9440 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9441# define SET_SS_DEF() \
9442 do \
9443 { \
9444 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9445 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9446 } while (0)
9447
9448 uint64_t uInfo;
9449 if (!IEM_IS_64BIT_CODE(pVCpu))
9450 {
9451/** @todo Check the effective address size crap! */
9452 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9453 {
9454 uint16_t u16EffAddr;
9455
9456 /* Handle the disp16 form with no registers first. */
9457 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9458 {
9459 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9460 uInfo = u16EffAddr;
9461 }
9462 else
9463 {
9464 /* Get the displacment. */
9465 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9466 {
9467 case 0: u16EffAddr = 0; break;
9468 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9469 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9470 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9471 }
9472 uInfo = u16EffAddr;
9473
9474 /* Add the base and index registers to the disp. */
9475 switch (bRm & X86_MODRM_RM_MASK)
9476 {
9477 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9478 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9479 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9480 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9481 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9482 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9483 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9484 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9485 }
9486 }
9487
9488 *pGCPtrEff = u16EffAddr;
9489 }
9490 else
9491 {
9492 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9493 uint32_t u32EffAddr;
9494
9495 /* Handle the disp32 form with no registers first. */
9496 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9497 {
9498 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9499 uInfo = u32EffAddr;
9500 }
9501 else
9502 {
9503 /* Get the register (or SIB) value. */
9504 uInfo = 0;
9505 switch ((bRm & X86_MODRM_RM_MASK))
9506 {
9507 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9508 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9509 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9510 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9511 case 4: /* SIB */
9512 {
9513 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9514 uInfo = (uint64_t)bSib << 32;
9515
9516 /* Get the index and scale it. */
9517 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9518 {
9519 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9520 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9521 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9522 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9523 case 4: u32EffAddr = 0; /*none */ break;
9524 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9525 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9526 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9527 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9528 }
9529 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9530
9531 /* add base */
9532 switch (bSib & X86_SIB_BASE_MASK)
9533 {
9534 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9535 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9536 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9537 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9538 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9539 case 5:
9540 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9541 {
9542 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9543 SET_SS_DEF();
9544 }
9545 else
9546 {
9547 uint32_t u32Disp;
9548 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9549 u32EffAddr += u32Disp;
9550 uInfo |= u32Disp;
9551 }
9552 break;
9553 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9554 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9555 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9556 }
9557 break;
9558 }
9559 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9560 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9561 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9562 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9563 }
9564
9565 /* Get and add the displacement. */
9566 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9567 {
9568 case 0:
9569 break;
9570 case 1:
9571 {
9572 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9573 u32EffAddr += i8Disp;
9574 uInfo |= (uint32_t)(int32_t)i8Disp;
9575 break;
9576 }
9577 case 2:
9578 {
9579 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9580 u32EffAddr += u32Disp;
9581 uInfo |= (uint32_t)u32Disp;
9582 break;
9583 }
9584 default:
9585 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9586 }
9587
9588 }
9589 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9590 *pGCPtrEff = u32EffAddr;
9591 else
9592 {
9593 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9594 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9595 }
9596 }
9597 }
9598 else
9599 {
9600 uint64_t u64EffAddr;
9601
9602 /* Handle the rip+disp32 form with no registers first. */
9603 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9604 {
9605 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9606 uInfo = (uint32_t)u64EffAddr;
9607 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9608 }
9609 else
9610 {
9611 /* Get the register (or SIB) value. */
9612 uInfo = 0;
9613 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9614 {
9615 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9616 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9617 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9618 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9619 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9620 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9621 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9622 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9623 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9624 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9625 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9626 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9627 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9628 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9629 /* SIB */
9630 case 4:
9631 case 12:
9632 {
9633 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9634 uInfo = (uint64_t)bSib << 32;
9635
9636 /* Get the index and scale it. */
9637 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9638 {
9639 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9640 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9641 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9642 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9643 case 4: u64EffAddr = 0; /*none */ break;
9644 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9645 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9646 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9647 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9648 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9649 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9650 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9651 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9652 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9653 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9654 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9655 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9656 }
9657 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9658
9659 /* add base */
9660 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9661 {
9662 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9663 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9664 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9665 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9666 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9667 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9668 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9669 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9670 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9671 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9672 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9673 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9674 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9675 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9676 /* complicated encodings */
9677 case 5:
9678 case 13:
9679 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9680 {
9681 if (!pVCpu->iem.s.uRexB)
9682 {
9683 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9684 SET_SS_DEF();
9685 }
9686 else
9687 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9688 }
9689 else
9690 {
9691 uint32_t u32Disp;
9692 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9693 u64EffAddr += (int32_t)u32Disp;
9694 uInfo |= u32Disp;
9695 }
9696 break;
9697 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9698 }
9699 break;
9700 }
9701 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9702 }
9703
9704 /* Get and add the displacement. */
9705 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9706 {
9707 case 0:
9708 break;
9709 case 1:
9710 {
9711 int8_t i8Disp;
9712 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9713 u64EffAddr += i8Disp;
9714 uInfo |= (uint32_t)(int32_t)i8Disp;
9715 break;
9716 }
9717 case 2:
9718 {
9719 uint32_t u32Disp;
9720 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9721 u64EffAddr += (int32_t)u32Disp;
9722 uInfo |= u32Disp;
9723 break;
9724 }
9725 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9726 }
9727
9728 }
9729
9730 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9731 *pGCPtrEff = u64EffAddr;
9732 else
9733 {
9734 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9735 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9736 }
9737 }
9738 *puInfo = uInfo;
9739
9740 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9741 return VINF_SUCCESS;
9742}
9743
9744
9745#ifdef IEM_WITH_SETJMP
9746/**
9747 * Calculates the effective address of a ModR/M memory operand, extended version
9748 * for use in the recompilers.
9749 *
9750 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9751 *
9752 * May longjmp on internal error.
9753 *
9754 * @return The effective address.
9755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9756 * @param bRm The ModRM byte.
9757 * @param cbImmAndRspOffset - First byte: The size of any immediate
9758 * following the effective address opcode bytes
9759 * (only for RIP relative addressing).
9760 * - Second byte: RSP displacement (for POP [ESP]).
9761 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
9762 * SIB byte (bits 39:32).
9763 */
9764RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP
9765{
9766 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9767# define SET_SS_DEF() \
9768 do \
9769 { \
9770 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9771 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9772 } while (0)
9773
9774 if (!IEM_IS_64BIT_CODE(pVCpu))
9775 {
9776/** @todo Check the effective address size crap! */
9777 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9778 {
9779 uint16_t u16EffAddr;
9780
9781 /* Handle the disp16 form with no registers first. */
9782 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9783 {
9784 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9785 *puInfo = u16EffAddr;
9786 }
9787 else
9788 {
9789 /* Get the displacment. */
9790 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9791 {
9792 case 0: u16EffAddr = 0; break;
9793 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9794 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9795 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9796 }
9797 *puInfo = u16EffAddr;
9798
9799 /* Add the base and index registers to the disp. */
9800 switch (bRm & X86_MODRM_RM_MASK)
9801 {
9802 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9803 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9804 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9805 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9806 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9807 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9808 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9809 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9810 }
9811 }
9812
9813 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16 uInfo=%#RX64\n", u16EffAddr, *puInfo));
9814 return u16EffAddr;
9815 }
9816
9817 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9818 uint32_t u32EffAddr;
9819 uint64_t uInfo;
9820
9821 /* Handle the disp32 form with no registers first. */
9822 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9823 {
9824 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9825 uInfo = u32EffAddr;
9826 }
9827 else
9828 {
9829 /* Get the register (or SIB) value. */
9830 uInfo = 0;
9831 switch ((bRm & X86_MODRM_RM_MASK))
9832 {
9833 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9834 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9835 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9836 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9837 case 4: /* SIB */
9838 {
9839 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9840 uInfo = (uint64_t)bSib << 32;
9841
9842 /* Get the index and scale it. */
9843 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9844 {
9845 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9846 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9847 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9848 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9849 case 4: u32EffAddr = 0; /*none */ break;
9850 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9851 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9852 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9853 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9854 }
9855 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9856
9857 /* add base */
9858 switch (bSib & X86_SIB_BASE_MASK)
9859 {
9860 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9861 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9862 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9863 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9864 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9865 case 5:
9866 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9867 {
9868 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9869 SET_SS_DEF();
9870 }
9871 else
9872 {
9873 uint32_t u32Disp;
9874 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9875 u32EffAddr += u32Disp;
9876 uInfo |= u32Disp;
9877 }
9878 break;
9879 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9880 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9881 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9882 }
9883 break;
9884 }
9885 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9886 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9887 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9888 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9889 }
9890
9891 /* Get and add the displacement. */
9892 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9893 {
9894 case 0:
9895 break;
9896 case 1:
9897 {
9898 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9899 u32EffAddr += i8Disp;
9900 uInfo |= (uint32_t)(int32_t)i8Disp;
9901 break;
9902 }
9903 case 2:
9904 {
9905 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9906 u32EffAddr += u32Disp;
9907 uInfo |= u32Disp;
9908 break;
9909 }
9910 default:
9911 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9912 }
9913 }
9914
9915 *puInfo = uInfo;
9916 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32 uInfo=%#RX64\n", u32EffAddr, uInfo));
9917 return u32EffAddr;
9918 }
9919
9920 uint64_t u64EffAddr;
9921 uint64_t uInfo;
9922
9923 /* Handle the rip+disp32 form with no registers first. */
9924 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9925 {
9926 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9927 uInfo = (uint32_t)u64EffAddr;
9928 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9929 }
9930 else
9931 {
9932 /* Get the register (or SIB) value. */
9933 uInfo = 0;
9934 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9935 {
9936 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9937 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9938 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9939 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9940 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9941 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9942 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9943 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9944 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9945 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9946 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9947 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9948 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9949 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9950 /* SIB */
9951 case 4:
9952 case 12:
9953 {
9954 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9955 uInfo = (uint64_t)bSib << 32;
9956
9957 /* Get the index and scale it. */
9958 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9959 {
9960 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9961 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9962 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9963 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9964 case 4: u64EffAddr = 0; /*none */ break;
9965 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9966 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9967 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9968 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9969 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9970 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9971 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9972 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9973 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9974 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9975 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9976 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9977 }
9978 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9979
9980 /* add base */
9981 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9982 {
9983 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9984 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9985 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9986 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9987 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9988 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9989 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9990 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9991 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9992 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9993 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9994 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9995 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9996 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9997 /* complicated encodings */
9998 case 5:
9999 case 13:
10000 if ((bRm & X86_MODRM_MOD_MASK) != 0)
10001 {
10002 if (!pVCpu->iem.s.uRexB)
10003 {
10004 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
10005 SET_SS_DEF();
10006 }
10007 else
10008 u64EffAddr += pVCpu->cpum.GstCtx.r13;
10009 }
10010 else
10011 {
10012 uint32_t u32Disp;
10013 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
10014 u64EffAddr += (int32_t)u32Disp;
10015 uInfo |= u32Disp;
10016 }
10017 break;
10018 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
10019 }
10020 break;
10021 }
10022 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
10023 }
10024
10025 /* Get and add the displacement. */
10026 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
10027 {
10028 case 0:
10029 break;
10030 case 1:
10031 {
10032 int8_t i8Disp;
10033 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
10034 u64EffAddr += i8Disp;
10035 uInfo |= (uint32_t)(int32_t)i8Disp;
10036 break;
10037 }
10038 case 2:
10039 {
10040 uint32_t u32Disp;
10041 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
10042 u64EffAddr += (int32_t)u32Disp;
10043 uInfo |= u32Disp;
10044 break;
10045 }
10046 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
10047 }
10048
10049 }
10050
10051 *puInfo = uInfo;
10052 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
10053 {
10054 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr, uInfo));
10055 return u64EffAddr;
10056 }
10057 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
10058 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv uInfo=%#RX64\n", u64EffAddr & UINT32_MAX, uInfo));
10059 return u64EffAddr & UINT32_MAX;
10060}
10061#endif /* IEM_WITH_SETJMP */
10062
10063/** @} */
10064
10065
10066#ifdef LOG_ENABLED
10067/**
10068 * Logs the current instruction.
10069 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10070 * @param fSameCtx Set if we have the same context information as the VMM,
10071 * clear if we may have already executed an instruction in
10072 * our debug context. When clear, we assume IEMCPU holds
10073 * valid CPU mode info.
10074 *
10075 * The @a fSameCtx parameter is now misleading and obsolete.
10076 * @param pszFunction The IEM function doing the execution.
10077 */
10078static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
10079{
10080# ifdef IN_RING3
10081 if (LogIs2Enabled())
10082 {
10083 char szInstr[256];
10084 uint32_t cbInstr = 0;
10085 if (fSameCtx)
10086 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
10087 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
10088 szInstr, sizeof(szInstr), &cbInstr);
10089 else
10090 {
10091 uint32_t fFlags = 0;
10092 switch (IEM_GET_CPU_MODE(pVCpu))
10093 {
10094 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
10095 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
10096 case IEMMODE_16BIT:
10097 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
10098 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
10099 else
10100 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
10101 break;
10102 }
10103 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
10104 szInstr, sizeof(szInstr), &cbInstr);
10105 }
10106
10107 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
10108 Log2(("**** %s fExec=%x\n"
10109 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
10110 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
10111 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
10112 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
10113 " %s\n"
10114 , pszFunction, pVCpu->iem.s.fExec,
10115 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
10116 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
10117 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
10118 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
10119 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
10120 szInstr));
10121
10122 if (LogIs3Enabled())
10123 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
10124 }
10125 else
10126# endif
10127 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
10128 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
10129 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
10130}
10131#endif /* LOG_ENABLED */
10132
10133
10134#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10135/**
10136 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
10137 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
10138 *
10139 * @returns Modified rcStrict.
10140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10141 * @param rcStrict The instruction execution status.
10142 */
10143static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
10144{
10145 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
10146 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
10147 {
10148 /* VMX preemption timer takes priority over NMI-window exits. */
10149 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
10150 {
10151 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
10152 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
10153 }
10154 /*
10155 * Check remaining intercepts.
10156 *
10157 * NMI-window and Interrupt-window VM-exits.
10158 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
10159 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
10160 *
10161 * See Intel spec. 26.7.6 "NMI-Window Exiting".
10162 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
10163 */
10164 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
10165 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10166 && !TRPMHasTrap(pVCpu))
10167 {
10168 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
10169 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
10170 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
10171 {
10172 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
10173 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
10174 }
10175 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
10176 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
10177 {
10178 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
10179 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
10180 }
10181 }
10182 }
10183 /* TPR-below threshold/APIC write has the highest priority. */
10184 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
10185 {
10186 rcStrict = iemVmxApicWriteEmulation(pVCpu);
10187 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10188 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
10189 }
10190 /* MTF takes priority over VMX-preemption timer. */
10191 else
10192 {
10193 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
10194 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
10195 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
10196 }
10197 return rcStrict;
10198}
10199#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10200
10201
10202/**
10203 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
10204 * IEMExecOneWithPrefetchedByPC.
10205 *
10206 * Similar code is found in IEMExecLots.
10207 *
10208 * @return Strict VBox status code.
10209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10210 * @param fExecuteInhibit If set, execute the instruction following CLI,
10211 * POP SS and MOV SS,GR.
10212 * @param pszFunction The calling function name.
10213 */
10214DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
10215{
10216 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10217 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10218 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10219 RT_NOREF_PV(pszFunction);
10220
10221#ifdef IEM_WITH_SETJMP
10222 VBOXSTRICTRC rcStrict;
10223 IEM_TRY_SETJMP(pVCpu, rcStrict)
10224 {
10225 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10226 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10227 }
10228 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10229 {
10230 pVCpu->iem.s.cLongJumps++;
10231 }
10232 IEM_CATCH_LONGJMP_END(pVCpu);
10233#else
10234 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10235 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10236#endif
10237 if (rcStrict == VINF_SUCCESS)
10238 pVCpu->iem.s.cInstructions++;
10239 if (pVCpu->iem.s.cActiveMappings > 0)
10240 {
10241 Assert(rcStrict != VINF_SUCCESS);
10242 iemMemRollback(pVCpu);
10243 }
10244 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10245 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10246 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10247
10248//#ifdef DEBUG
10249// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
10250//#endif
10251
10252#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10253 /*
10254 * Perform any VMX nested-guest instruction boundary actions.
10255 *
10256 * If any of these causes a VM-exit, we must skip executing the next
10257 * instruction (would run into stale page tables). A VM-exit makes sure
10258 * there is no interrupt-inhibition, so that should ensure we don't go
10259 * to try execute the next instruction. Clearing fExecuteInhibit is
10260 * problematic because of the setjmp/longjmp clobbering above.
10261 */
10262 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10263 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
10264 || rcStrict != VINF_SUCCESS)
10265 { /* likely */ }
10266 else
10267 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10268#endif
10269
10270 /* Execute the next instruction as well if a cli, pop ss or
10271 mov ss, Gr has just completed successfully. */
10272 if ( fExecuteInhibit
10273 && rcStrict == VINF_SUCCESS
10274 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
10275 {
10276 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
10277 if (rcStrict == VINF_SUCCESS)
10278 {
10279#ifdef LOG_ENABLED
10280 iemLogCurInstr(pVCpu, false, pszFunction);
10281#endif
10282#ifdef IEM_WITH_SETJMP
10283 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
10284 {
10285 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10286 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10287 }
10288 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10289 {
10290 pVCpu->iem.s.cLongJumps++;
10291 }
10292 IEM_CATCH_LONGJMP_END(pVCpu);
10293#else
10294 IEM_OPCODE_GET_FIRST_U8(&b);
10295 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10296#endif
10297 if (rcStrict == VINF_SUCCESS)
10298 {
10299 pVCpu->iem.s.cInstructions++;
10300#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10301 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10302 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
10303 { /* likely */ }
10304 else
10305 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10306#endif
10307 }
10308 if (pVCpu->iem.s.cActiveMappings > 0)
10309 {
10310 Assert(rcStrict != VINF_SUCCESS);
10311 iemMemRollback(pVCpu);
10312 }
10313 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
10314 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
10315 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
10316 }
10317 else if (pVCpu->iem.s.cActiveMappings > 0)
10318 iemMemRollback(pVCpu);
10319 /** @todo drop this after we bake this change into RIP advancing. */
10320 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
10321 }
10322
10323 /*
10324 * Return value fiddling, statistics and sanity assertions.
10325 */
10326 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10327
10328 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10329 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10330 return rcStrict;
10331}
10332
10333
10334/**
10335 * Execute one instruction.
10336 *
10337 * @return Strict VBox status code.
10338 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10339 */
10340VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10341{
10342 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10343#ifdef LOG_ENABLED
10344 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10345#endif
10346
10347 /*
10348 * Do the decoding and emulation.
10349 */
10350 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10351 if (rcStrict == VINF_SUCCESS)
10352 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10353 else if (pVCpu->iem.s.cActiveMappings > 0)
10354 iemMemRollback(pVCpu);
10355
10356 if (rcStrict != VINF_SUCCESS)
10357 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10358 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10359 return rcStrict;
10360}
10361
10362
10363VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10364{
10365 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10366 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10367 if (rcStrict == VINF_SUCCESS)
10368 {
10369 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10370 if (pcbWritten)
10371 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10372 }
10373 else if (pVCpu->iem.s.cActiveMappings > 0)
10374 iemMemRollback(pVCpu);
10375
10376 return rcStrict;
10377}
10378
10379
10380VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10381 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10382{
10383 VBOXSTRICTRC rcStrict;
10384 if ( cbOpcodeBytes
10385 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10386 {
10387 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
10388#ifdef IEM_WITH_CODE_TLB
10389 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10390 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10391 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10392 pVCpu->iem.s.offCurInstrStart = 0;
10393 pVCpu->iem.s.offInstrNextByte = 0;
10394 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10395#else
10396 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10397 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10398#endif
10399 rcStrict = VINF_SUCCESS;
10400 }
10401 else
10402 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10403 if (rcStrict == VINF_SUCCESS)
10404 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10405 else if (pVCpu->iem.s.cActiveMappings > 0)
10406 iemMemRollback(pVCpu);
10407
10408 return rcStrict;
10409}
10410
10411
10412VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10413{
10414 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10415 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10416 if (rcStrict == VINF_SUCCESS)
10417 {
10418 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10419 if (pcbWritten)
10420 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10421 }
10422 else if (pVCpu->iem.s.cActiveMappings > 0)
10423 iemMemRollback(pVCpu);
10424
10425 return rcStrict;
10426}
10427
10428
10429VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10430 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10431{
10432 VBOXSTRICTRC rcStrict;
10433 if ( cbOpcodeBytes
10434 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10435 {
10436 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
10437#ifdef IEM_WITH_CODE_TLB
10438 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10439 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10440 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10441 pVCpu->iem.s.offCurInstrStart = 0;
10442 pVCpu->iem.s.offInstrNextByte = 0;
10443 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
10444#else
10445 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10446 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10447#endif
10448 rcStrict = VINF_SUCCESS;
10449 }
10450 else
10451 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
10452 if (rcStrict == VINF_SUCCESS)
10453 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10454 else if (pVCpu->iem.s.cActiveMappings > 0)
10455 iemMemRollback(pVCpu);
10456
10457 return rcStrict;
10458}
10459
10460
10461/**
10462 * For handling split cacheline lock operations when the host has split-lock
10463 * detection enabled.
10464 *
10465 * This will cause the interpreter to disregard the lock prefix and implicit
10466 * locking (xchg).
10467 *
10468 * @returns Strict VBox status code.
10469 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10470 */
10471VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10472{
10473 /*
10474 * Do the decoding and emulation.
10475 */
10476 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
10477 if (rcStrict == VINF_SUCCESS)
10478 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10479 else if (pVCpu->iem.s.cActiveMappings > 0)
10480 iemMemRollback(pVCpu);
10481
10482 if (rcStrict != VINF_SUCCESS)
10483 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10484 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10485 return rcStrict;
10486}
10487
10488
10489VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10490{
10491 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10492 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10493 Assert(cMaxInstructions > 0);
10494
10495 /*
10496 * See if there is an interrupt pending in TRPM, inject it if we can.
10497 */
10498 /** @todo What if we are injecting an exception and not an interrupt? Is that
10499 * possible here? For now we assert it is indeed only an interrupt. */
10500 if (!TRPMHasTrap(pVCpu))
10501 { /* likely */ }
10502 else
10503 {
10504 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10505 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10506 {
10507 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10508#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10509 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10510 if (fIntrEnabled)
10511 {
10512 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10513 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10514 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10515 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10516 else
10517 {
10518 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10519 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10520 }
10521 }
10522#else
10523 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10524#endif
10525 if (fIntrEnabled)
10526 {
10527 uint8_t u8TrapNo;
10528 TRPMEVENT enmType;
10529 uint32_t uErrCode;
10530 RTGCPTR uCr2;
10531 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10532 AssertRC(rc2);
10533 Assert(enmType == TRPM_HARDWARE_INT);
10534 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10535
10536 TRPMResetTrap(pVCpu);
10537
10538#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10539 /* Injecting an event may cause a VM-exit. */
10540 if ( rcStrict != VINF_SUCCESS
10541 && rcStrict != VINF_IEM_RAISED_XCPT)
10542 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10543#else
10544 NOREF(rcStrict);
10545#endif
10546 }
10547 }
10548 }
10549
10550 /*
10551 * Initial decoder init w/ prefetch, then setup setjmp.
10552 */
10553 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10554 if (rcStrict == VINF_SUCCESS)
10555 {
10556#ifdef IEM_WITH_SETJMP
10557 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10558 IEM_TRY_SETJMP(pVCpu, rcStrict)
10559#endif
10560 {
10561 /*
10562 * The run loop. We limit ourselves to 4096 instructions right now.
10563 */
10564 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10565 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10566 for (;;)
10567 {
10568 /*
10569 * Log the state.
10570 */
10571#ifdef LOG_ENABLED
10572 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10573#endif
10574
10575 /*
10576 * Do the decoding and emulation.
10577 */
10578 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10579 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10580#ifdef VBOX_STRICT
10581 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10582#endif
10583 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10584 {
10585 Assert(pVCpu->iem.s.cActiveMappings == 0);
10586 pVCpu->iem.s.cInstructions++;
10587
10588#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10589 /* Perform any VMX nested-guest instruction boundary actions. */
10590 uint64_t fCpu = pVCpu->fLocalForcedActions;
10591 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10592 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10593 { /* likely */ }
10594 else
10595 {
10596 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10597 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10598 fCpu = pVCpu->fLocalForcedActions;
10599 else
10600 {
10601 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10602 break;
10603 }
10604 }
10605#endif
10606 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10607 {
10608#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10609 uint64_t fCpu = pVCpu->fLocalForcedActions;
10610#endif
10611 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10612 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10613 | VMCPU_FF_TLB_FLUSH
10614 | VMCPU_FF_UNHALT );
10615
10616 if (RT_LIKELY( ( !fCpu
10617 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10618 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10619 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10620 {
10621 if (--cMaxInstructionsGccStupidity > 0)
10622 {
10623 /* Poll timers every now an then according to the caller's specs. */
10624 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10625 || !TMTimerPollBool(pVM, pVCpu))
10626 {
10627 Assert(pVCpu->iem.s.cActiveMappings == 0);
10628 iemReInitDecoder(pVCpu);
10629 continue;
10630 }
10631 }
10632 }
10633 }
10634 Assert(pVCpu->iem.s.cActiveMappings == 0);
10635 }
10636 else if (pVCpu->iem.s.cActiveMappings > 0)
10637 iemMemRollback(pVCpu);
10638 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10639 break;
10640 }
10641 }
10642#ifdef IEM_WITH_SETJMP
10643 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10644 {
10645 if (pVCpu->iem.s.cActiveMappings > 0)
10646 iemMemRollback(pVCpu);
10647# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10648 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10649# endif
10650 pVCpu->iem.s.cLongJumps++;
10651 }
10652 IEM_CATCH_LONGJMP_END(pVCpu);
10653#endif
10654
10655 /*
10656 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10657 */
10658 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10659 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10660 }
10661 else
10662 {
10663 if (pVCpu->iem.s.cActiveMappings > 0)
10664 iemMemRollback(pVCpu);
10665
10666#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10667 /*
10668 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10669 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10670 */
10671 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10672#endif
10673 }
10674
10675 /*
10676 * Maybe re-enter raw-mode and log.
10677 */
10678 if (rcStrict != VINF_SUCCESS)
10679 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10680 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10681 if (pcInstructions)
10682 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10683 return rcStrict;
10684}
10685
10686
10687/**
10688 * Interface used by EMExecuteExec, does exit statistics and limits.
10689 *
10690 * @returns Strict VBox status code.
10691 * @param pVCpu The cross context virtual CPU structure.
10692 * @param fWillExit To be defined.
10693 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10694 * @param cMaxInstructions Maximum number of instructions to execute.
10695 * @param cMaxInstructionsWithoutExits
10696 * The max number of instructions without exits.
10697 * @param pStats Where to return statistics.
10698 */
10699VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10700 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10701{
10702 NOREF(fWillExit); /** @todo define flexible exit crits */
10703
10704 /*
10705 * Initialize return stats.
10706 */
10707 pStats->cInstructions = 0;
10708 pStats->cExits = 0;
10709 pStats->cMaxExitDistance = 0;
10710 pStats->cReserved = 0;
10711
10712 /*
10713 * Initial decoder init w/ prefetch, then setup setjmp.
10714 */
10715 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
10716 if (rcStrict == VINF_SUCCESS)
10717 {
10718#ifdef IEM_WITH_SETJMP
10719 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10720 IEM_TRY_SETJMP(pVCpu, rcStrict)
10721#endif
10722 {
10723#ifdef IN_RING0
10724 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10725#endif
10726 uint32_t cInstructionSinceLastExit = 0;
10727
10728 /*
10729 * The run loop. We limit ourselves to 4096 instructions right now.
10730 */
10731 PVM pVM = pVCpu->CTX_SUFF(pVM);
10732 for (;;)
10733 {
10734 /*
10735 * Log the state.
10736 */
10737#ifdef LOG_ENABLED
10738 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10739#endif
10740
10741 /*
10742 * Do the decoding and emulation.
10743 */
10744 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10745
10746 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10747 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10748
10749 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10750 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10751 {
10752 pStats->cExits += 1;
10753 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10754 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10755 cInstructionSinceLastExit = 0;
10756 }
10757
10758 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10759 {
10760 Assert(pVCpu->iem.s.cActiveMappings == 0);
10761 pVCpu->iem.s.cInstructions++;
10762 pStats->cInstructions++;
10763 cInstructionSinceLastExit++;
10764
10765#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10766 /* Perform any VMX nested-guest instruction boundary actions. */
10767 uint64_t fCpu = pVCpu->fLocalForcedActions;
10768 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10769 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10770 { /* likely */ }
10771 else
10772 {
10773 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10774 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10775 fCpu = pVCpu->fLocalForcedActions;
10776 else
10777 {
10778 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10779 break;
10780 }
10781 }
10782#endif
10783 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10784 {
10785#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10786 uint64_t fCpu = pVCpu->fLocalForcedActions;
10787#endif
10788 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10789 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10790 | VMCPU_FF_TLB_FLUSH
10791 | VMCPU_FF_UNHALT );
10792 if (RT_LIKELY( ( ( !fCpu
10793 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10794 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10795 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10796 || pStats->cInstructions < cMinInstructions))
10797 {
10798 if (pStats->cInstructions < cMaxInstructions)
10799 {
10800 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10801 {
10802#ifdef IN_RING0
10803 if ( !fCheckPreemptionPending
10804 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10805#endif
10806 {
10807 Assert(pVCpu->iem.s.cActiveMappings == 0);
10808 iemReInitDecoder(pVCpu);
10809 continue;
10810 }
10811#ifdef IN_RING0
10812 rcStrict = VINF_EM_RAW_INTERRUPT;
10813 break;
10814#endif
10815 }
10816 }
10817 }
10818 Assert(!(fCpu & VMCPU_FF_IEM));
10819 }
10820 Assert(pVCpu->iem.s.cActiveMappings == 0);
10821 }
10822 else if (pVCpu->iem.s.cActiveMappings > 0)
10823 iemMemRollback(pVCpu);
10824 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10825 break;
10826 }
10827 }
10828#ifdef IEM_WITH_SETJMP
10829 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10830 {
10831 if (pVCpu->iem.s.cActiveMappings > 0)
10832 iemMemRollback(pVCpu);
10833 pVCpu->iem.s.cLongJumps++;
10834 }
10835 IEM_CATCH_LONGJMP_END(pVCpu);
10836#endif
10837
10838 /*
10839 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10840 */
10841 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10843 }
10844 else
10845 {
10846 if (pVCpu->iem.s.cActiveMappings > 0)
10847 iemMemRollback(pVCpu);
10848
10849#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10850 /*
10851 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10852 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10853 */
10854 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10855#endif
10856 }
10857
10858 /*
10859 * Maybe re-enter raw-mode and log.
10860 */
10861 if (rcStrict != VINF_SUCCESS)
10862 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10863 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10864 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10865 return rcStrict;
10866}
10867
10868
10869/**
10870 * Injects a trap, fault, abort, software interrupt or external interrupt.
10871 *
10872 * The parameter list matches TRPMQueryTrapAll pretty closely.
10873 *
10874 * @returns Strict VBox status code.
10875 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10876 * @param u8TrapNo The trap number.
10877 * @param enmType What type is it (trap/fault/abort), software
10878 * interrupt or hardware interrupt.
10879 * @param uErrCode The error code if applicable.
10880 * @param uCr2 The CR2 value if applicable.
10881 * @param cbInstr The instruction length (only relevant for
10882 * software interrupts).
10883 */
10884VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10885 uint8_t cbInstr)
10886{
10887 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10888#ifdef DBGFTRACE_ENABLED
10889 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10890 u8TrapNo, enmType, uErrCode, uCr2);
10891#endif
10892
10893 uint32_t fFlags;
10894 switch (enmType)
10895 {
10896 case TRPM_HARDWARE_INT:
10897 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10898 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10899 uErrCode = uCr2 = 0;
10900 break;
10901
10902 case TRPM_SOFTWARE_INT:
10903 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10904 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10905 uErrCode = uCr2 = 0;
10906 break;
10907
10908 case TRPM_TRAP:
10909 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10910 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10911 if (u8TrapNo == X86_XCPT_PF)
10912 fFlags |= IEM_XCPT_FLAGS_CR2;
10913 switch (u8TrapNo)
10914 {
10915 case X86_XCPT_DF:
10916 case X86_XCPT_TS:
10917 case X86_XCPT_NP:
10918 case X86_XCPT_SS:
10919 case X86_XCPT_PF:
10920 case X86_XCPT_AC:
10921 case X86_XCPT_GP:
10922 fFlags |= IEM_XCPT_FLAGS_ERR;
10923 break;
10924 }
10925 break;
10926
10927 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10928 }
10929
10930 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10931
10932 if (pVCpu->iem.s.cActiveMappings > 0)
10933 iemMemRollback(pVCpu);
10934
10935 return rcStrict;
10936}
10937
10938
10939/**
10940 * Injects the active TRPM event.
10941 *
10942 * @returns Strict VBox status code.
10943 * @param pVCpu The cross context virtual CPU structure.
10944 */
10945VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10946{
10947#ifndef IEM_IMPLEMENTS_TASKSWITCH
10948 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10949#else
10950 uint8_t u8TrapNo;
10951 TRPMEVENT enmType;
10952 uint32_t uErrCode;
10953 RTGCUINTPTR uCr2;
10954 uint8_t cbInstr;
10955 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10956 if (RT_FAILURE(rc))
10957 return rc;
10958
10959 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10960 * ICEBP \#DB injection as a special case. */
10961 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10962#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10963 if (rcStrict == VINF_SVM_VMEXIT)
10964 rcStrict = VINF_SUCCESS;
10965#endif
10966#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10967 if (rcStrict == VINF_VMX_VMEXIT)
10968 rcStrict = VINF_SUCCESS;
10969#endif
10970 /** @todo Are there any other codes that imply the event was successfully
10971 * delivered to the guest? See @bugref{6607}. */
10972 if ( rcStrict == VINF_SUCCESS
10973 || rcStrict == VINF_IEM_RAISED_XCPT)
10974 TRPMResetTrap(pVCpu);
10975
10976 return rcStrict;
10977#endif
10978}
10979
10980
10981VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10982{
10983 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10984 return VERR_NOT_IMPLEMENTED;
10985}
10986
10987
10988VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10989{
10990 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10991 return VERR_NOT_IMPLEMENTED;
10992}
10993
10994
10995/**
10996 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10997 *
10998 * This API ASSUMES that the caller has already verified that the guest code is
10999 * allowed to access the I/O port. (The I/O port is in the DX register in the
11000 * guest state.)
11001 *
11002 * @returns Strict VBox status code.
11003 * @param pVCpu The cross context virtual CPU structure.
11004 * @param cbValue The size of the I/O port access (1, 2, or 4).
11005 * @param enmAddrMode The addressing mode.
11006 * @param fRepPrefix Indicates whether a repeat prefix is used
11007 * (doesn't matter which for this instruction).
11008 * @param cbInstr The instruction length in bytes.
11009 * @param iEffSeg The effective segment address.
11010 * @param fIoChecked Whether the access to the I/O port has been
11011 * checked or not. It's typically checked in the
11012 * HM scenario.
11013 */
11014VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11015 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
11016{
11017 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
11018 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11019
11020 /*
11021 * State init.
11022 */
11023 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11024
11025 /*
11026 * Switch orgy for getting to the right handler.
11027 */
11028 VBOXSTRICTRC rcStrict;
11029 if (fRepPrefix)
11030 {
11031 switch (enmAddrMode)
11032 {
11033 case IEMMODE_16BIT:
11034 switch (cbValue)
11035 {
11036 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11037 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11038 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11039 default:
11040 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11041 }
11042 break;
11043
11044 case IEMMODE_32BIT:
11045 switch (cbValue)
11046 {
11047 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11048 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11049 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11050 default:
11051 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11052 }
11053 break;
11054
11055 case IEMMODE_64BIT:
11056 switch (cbValue)
11057 {
11058 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11059 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11060 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11061 default:
11062 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11063 }
11064 break;
11065
11066 default:
11067 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11068 }
11069 }
11070 else
11071 {
11072 switch (enmAddrMode)
11073 {
11074 case IEMMODE_16BIT:
11075 switch (cbValue)
11076 {
11077 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11078 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11079 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11080 default:
11081 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11082 }
11083 break;
11084
11085 case IEMMODE_32BIT:
11086 switch (cbValue)
11087 {
11088 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11089 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11090 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11091 default:
11092 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11093 }
11094 break;
11095
11096 case IEMMODE_64BIT:
11097 switch (cbValue)
11098 {
11099 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11100 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11101 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
11102 default:
11103 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11104 }
11105 break;
11106
11107 default:
11108 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11109 }
11110 }
11111
11112 if (pVCpu->iem.s.cActiveMappings)
11113 iemMemRollback(pVCpu);
11114
11115 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11116}
11117
11118
11119/**
11120 * Interface for HM and EM for executing string I/O IN (read) instructions.
11121 *
11122 * This API ASSUMES that the caller has already verified that the guest code is
11123 * allowed to access the I/O port. (The I/O port is in the DX register in the
11124 * guest state.)
11125 *
11126 * @returns Strict VBox status code.
11127 * @param pVCpu The cross context virtual CPU structure.
11128 * @param cbValue The size of the I/O port access (1, 2, or 4).
11129 * @param enmAddrMode The addressing mode.
11130 * @param fRepPrefix Indicates whether a repeat prefix is used
11131 * (doesn't matter which for this instruction).
11132 * @param cbInstr The instruction length in bytes.
11133 * @param fIoChecked Whether the access to the I/O port has been
11134 * checked or not. It's typically checked in the
11135 * HM scenario.
11136 */
11137VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
11138 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
11139{
11140 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11141
11142 /*
11143 * State init.
11144 */
11145 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11146
11147 /*
11148 * Switch orgy for getting to the right handler.
11149 */
11150 VBOXSTRICTRC rcStrict;
11151 if (fRepPrefix)
11152 {
11153 switch (enmAddrMode)
11154 {
11155 case IEMMODE_16BIT:
11156 switch (cbValue)
11157 {
11158 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11159 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11160 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11161 default:
11162 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11163 }
11164 break;
11165
11166 case IEMMODE_32BIT:
11167 switch (cbValue)
11168 {
11169 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11170 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11171 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11172 default:
11173 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11174 }
11175 break;
11176
11177 case IEMMODE_64BIT:
11178 switch (cbValue)
11179 {
11180 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11181 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11182 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11183 default:
11184 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11185 }
11186 break;
11187
11188 default:
11189 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11190 }
11191 }
11192 else
11193 {
11194 switch (enmAddrMode)
11195 {
11196 case IEMMODE_16BIT:
11197 switch (cbValue)
11198 {
11199 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
11200 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
11201 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
11202 default:
11203 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11204 }
11205 break;
11206
11207 case IEMMODE_32BIT:
11208 switch (cbValue)
11209 {
11210 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
11211 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
11212 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
11213 default:
11214 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11215 }
11216 break;
11217
11218 case IEMMODE_64BIT:
11219 switch (cbValue)
11220 {
11221 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
11222 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
11223 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
11224 default:
11225 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
11226 }
11227 break;
11228
11229 default:
11230 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
11231 }
11232 }
11233
11234 if ( pVCpu->iem.s.cActiveMappings == 0
11235 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
11236 { /* likely */ }
11237 else
11238 {
11239 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
11240 iemMemRollback(pVCpu);
11241 }
11242 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11243}
11244
11245
11246/**
11247 * Interface for rawmode to write execute an OUT instruction.
11248 *
11249 * @returns Strict VBox status code.
11250 * @param pVCpu The cross context virtual CPU structure.
11251 * @param cbInstr The instruction length in bytes.
11252 * @param u16Port The port to read.
11253 * @param fImm Whether the port is specified using an immediate operand or
11254 * using the implicit DX register.
11255 * @param cbReg The register size.
11256 *
11257 * @remarks In ring-0 not all of the state needs to be synced in.
11258 */
11259VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11260{
11261 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11262 Assert(cbReg <= 4 && cbReg != 3);
11263
11264 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11265 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
11266 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11267 Assert(!pVCpu->iem.s.cActiveMappings);
11268 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11269}
11270
11271
11272/**
11273 * Interface for rawmode to write execute an IN instruction.
11274 *
11275 * @returns Strict VBox status code.
11276 * @param pVCpu The cross context virtual CPU structure.
11277 * @param cbInstr The instruction length in bytes.
11278 * @param u16Port The port to read.
11279 * @param fImm Whether the port is specified using an immediate operand or
11280 * using the implicit DX.
11281 * @param cbReg The register size.
11282 */
11283VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
11284{
11285 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11286 Assert(cbReg <= 4 && cbReg != 3);
11287
11288 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11289 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
11290 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
11291 Assert(!pVCpu->iem.s.cActiveMappings);
11292 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11293}
11294
11295
11296/**
11297 * Interface for HM and EM to write to a CRx register.
11298 *
11299 * @returns Strict VBox status code.
11300 * @param pVCpu The cross context virtual CPU structure.
11301 * @param cbInstr The instruction length in bytes.
11302 * @param iCrReg The control register number (destination).
11303 * @param iGReg The general purpose register number (source).
11304 *
11305 * @remarks In ring-0 not all of the state needs to be synced in.
11306 */
11307VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
11308{
11309 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11310 Assert(iCrReg < 16);
11311 Assert(iGReg < 16);
11312
11313 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11314 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
11315 Assert(!pVCpu->iem.s.cActiveMappings);
11316 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11317}
11318
11319
11320/**
11321 * Interface for HM and EM to read from a CRx register.
11322 *
11323 * @returns Strict VBox status code.
11324 * @param pVCpu The cross context virtual CPU structure.
11325 * @param cbInstr The instruction length in bytes.
11326 * @param iGReg The general purpose register number (destination).
11327 * @param iCrReg The control register number (source).
11328 *
11329 * @remarks In ring-0 not all of the state needs to be synced in.
11330 */
11331VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
11332{
11333 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11334 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11335 | CPUMCTX_EXTRN_APIC_TPR);
11336 Assert(iCrReg < 16);
11337 Assert(iGReg < 16);
11338
11339 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11340 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11341 Assert(!pVCpu->iem.s.cActiveMappings);
11342 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11343}
11344
11345
11346/**
11347 * Interface for HM and EM to write to a DRx register.
11348 *
11349 * @returns Strict VBox status code.
11350 * @param pVCpu The cross context virtual CPU structure.
11351 * @param cbInstr The instruction length in bytes.
11352 * @param iDrReg The debug register number (destination).
11353 * @param iGReg The general purpose register number (source).
11354 *
11355 * @remarks In ring-0 not all of the state needs to be synced in.
11356 */
11357VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11358{
11359 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11360 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11361 Assert(iDrReg < 8);
11362 Assert(iGReg < 16);
11363
11364 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11365 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11366 Assert(!pVCpu->iem.s.cActiveMappings);
11367 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11368}
11369
11370
11371/**
11372 * Interface for HM and EM to read from a DRx register.
11373 *
11374 * @returns Strict VBox status code.
11375 * @param pVCpu The cross context virtual CPU structure.
11376 * @param cbInstr The instruction length in bytes.
11377 * @param iGReg The general purpose register number (destination).
11378 * @param iDrReg The debug register number (source).
11379 *
11380 * @remarks In ring-0 not all of the state needs to be synced in.
11381 */
11382VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11383{
11384 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11385 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11386 Assert(iDrReg < 8);
11387 Assert(iGReg < 16);
11388
11389 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11390 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11391 Assert(!pVCpu->iem.s.cActiveMappings);
11392 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11393}
11394
11395
11396/**
11397 * Interface for HM and EM to clear the CR0[TS] bit.
11398 *
11399 * @returns Strict VBox status code.
11400 * @param pVCpu The cross context virtual CPU structure.
11401 * @param cbInstr The instruction length in bytes.
11402 *
11403 * @remarks In ring-0 not all of the state needs to be synced in.
11404 */
11405VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11406{
11407 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11408
11409 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11410 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11411 Assert(!pVCpu->iem.s.cActiveMappings);
11412 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11413}
11414
11415
11416/**
11417 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11418 *
11419 * @returns Strict VBox status code.
11420 * @param pVCpu The cross context virtual CPU structure.
11421 * @param cbInstr The instruction length in bytes.
11422 * @param uValue The value to load into CR0.
11423 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11424 * memory operand. Otherwise pass NIL_RTGCPTR.
11425 *
11426 * @remarks In ring-0 not all of the state needs to be synced in.
11427 */
11428VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11429{
11430 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11431
11432 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11433 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11434 Assert(!pVCpu->iem.s.cActiveMappings);
11435 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11436}
11437
11438
11439/**
11440 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11441 *
11442 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11443 *
11444 * @returns Strict VBox status code.
11445 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11446 * @param cbInstr The instruction length in bytes.
11447 * @remarks In ring-0 not all of the state needs to be synced in.
11448 * @thread EMT(pVCpu)
11449 */
11450VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11451{
11452 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11453
11454 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11455 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11456 Assert(!pVCpu->iem.s.cActiveMappings);
11457 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11458}
11459
11460
11461/**
11462 * Interface for HM and EM to emulate the WBINVD instruction.
11463 *
11464 * @returns Strict VBox status code.
11465 * @param pVCpu The cross context virtual CPU structure.
11466 * @param cbInstr The instruction length in bytes.
11467 *
11468 * @remarks In ring-0 not all of the state needs to be synced in.
11469 */
11470VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11471{
11472 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11473
11474 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11475 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11476 Assert(!pVCpu->iem.s.cActiveMappings);
11477 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11478}
11479
11480
11481/**
11482 * Interface for HM and EM to emulate the INVD instruction.
11483 *
11484 * @returns Strict VBox status code.
11485 * @param pVCpu The cross context virtual CPU structure.
11486 * @param cbInstr The instruction length in bytes.
11487 *
11488 * @remarks In ring-0 not all of the state needs to be synced in.
11489 */
11490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11491{
11492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11493
11494 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11495 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11496 Assert(!pVCpu->iem.s.cActiveMappings);
11497 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11498}
11499
11500
11501/**
11502 * Interface for HM and EM to emulate the INVLPG instruction.
11503 *
11504 * @returns Strict VBox status code.
11505 * @retval VINF_PGM_SYNC_CR3
11506 *
11507 * @param pVCpu The cross context virtual CPU structure.
11508 * @param cbInstr The instruction length in bytes.
11509 * @param GCPtrPage The effective address of the page to invalidate.
11510 *
11511 * @remarks In ring-0 not all of the state needs to be synced in.
11512 */
11513VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11514{
11515 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11516
11517 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11518 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11519 Assert(!pVCpu->iem.s.cActiveMappings);
11520 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11521}
11522
11523
11524/**
11525 * Interface for HM and EM to emulate the INVPCID instruction.
11526 *
11527 * @returns Strict VBox status code.
11528 * @retval VINF_PGM_SYNC_CR3
11529 *
11530 * @param pVCpu The cross context virtual CPU structure.
11531 * @param cbInstr The instruction length in bytes.
11532 * @param iEffSeg The effective segment register.
11533 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11534 * @param uType The invalidation type.
11535 *
11536 * @remarks In ring-0 not all of the state needs to be synced in.
11537 */
11538VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11539 uint64_t uType)
11540{
11541 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11542
11543 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11544 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11545 Assert(!pVCpu->iem.s.cActiveMappings);
11546 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11547}
11548
11549
11550/**
11551 * Interface for HM and EM to emulate the CPUID instruction.
11552 *
11553 * @returns Strict VBox status code.
11554 *
11555 * @param pVCpu The cross context virtual CPU structure.
11556 * @param cbInstr The instruction length in bytes.
11557 *
11558 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11559 */
11560VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11561{
11562 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11563 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11564
11565 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11566 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11567 Assert(!pVCpu->iem.s.cActiveMappings);
11568 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11569}
11570
11571
11572/**
11573 * Interface for HM and EM to emulate the RDPMC instruction.
11574 *
11575 * @returns Strict VBox status code.
11576 *
11577 * @param pVCpu The cross context virtual CPU structure.
11578 * @param cbInstr The instruction length in bytes.
11579 *
11580 * @remarks Not all of the state needs to be synced in.
11581 */
11582VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11583{
11584 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11585 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11586
11587 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11588 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11589 Assert(!pVCpu->iem.s.cActiveMappings);
11590 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11591}
11592
11593
11594/**
11595 * Interface for HM and EM to emulate the RDTSC instruction.
11596 *
11597 * @returns Strict VBox status code.
11598 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11599 *
11600 * @param pVCpu The cross context virtual CPU structure.
11601 * @param cbInstr The instruction length in bytes.
11602 *
11603 * @remarks Not all of the state needs to be synced in.
11604 */
11605VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11606{
11607 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11608 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11609
11610 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11611 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11612 Assert(!pVCpu->iem.s.cActiveMappings);
11613 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11614}
11615
11616
11617/**
11618 * Interface for HM and EM to emulate the RDTSCP instruction.
11619 *
11620 * @returns Strict VBox status code.
11621 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11622 *
11623 * @param pVCpu The cross context virtual CPU structure.
11624 * @param cbInstr The instruction length in bytes.
11625 *
11626 * @remarks Not all of the state needs to be synced in. Recommended
11627 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11628 */
11629VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11630{
11631 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11632 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11633
11634 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11635 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11636 Assert(!pVCpu->iem.s.cActiveMappings);
11637 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11638}
11639
11640
11641/**
11642 * Interface for HM and EM to emulate the RDMSR instruction.
11643 *
11644 * @returns Strict VBox status code.
11645 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11646 *
11647 * @param pVCpu The cross context virtual CPU structure.
11648 * @param cbInstr The instruction length in bytes.
11649 *
11650 * @remarks Not all of the state needs to be synced in. Requires RCX and
11651 * (currently) all MSRs.
11652 */
11653VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11654{
11655 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11656 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11657
11658 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11659 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11660 Assert(!pVCpu->iem.s.cActiveMappings);
11661 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11662}
11663
11664
11665/**
11666 * Interface for HM and EM to emulate the WRMSR instruction.
11667 *
11668 * @returns Strict VBox status code.
11669 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11670 *
11671 * @param pVCpu The cross context virtual CPU structure.
11672 * @param cbInstr The instruction length in bytes.
11673 *
11674 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11675 * and (currently) all MSRs.
11676 */
11677VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11678{
11679 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11680 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11681 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11682
11683 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11684 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11685 Assert(!pVCpu->iem.s.cActiveMappings);
11686 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11687}
11688
11689
11690/**
11691 * Interface for HM and EM to emulate the MONITOR instruction.
11692 *
11693 * @returns Strict VBox status code.
11694 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11695 *
11696 * @param pVCpu The cross context virtual CPU structure.
11697 * @param cbInstr The instruction length in bytes.
11698 *
11699 * @remarks Not all of the state needs to be synced in.
11700 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11701 * are used.
11702 */
11703VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11704{
11705 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11706 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11707
11708 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11709 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11710 Assert(!pVCpu->iem.s.cActiveMappings);
11711 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11712}
11713
11714
11715/**
11716 * Interface for HM and EM to emulate the MWAIT instruction.
11717 *
11718 * @returns Strict VBox status code.
11719 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11720 *
11721 * @param pVCpu The cross context virtual CPU structure.
11722 * @param cbInstr The instruction length in bytes.
11723 *
11724 * @remarks Not all of the state needs to be synced in.
11725 */
11726VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11727{
11728 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11729 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11730
11731 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11732 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11733 Assert(!pVCpu->iem.s.cActiveMappings);
11734 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11735}
11736
11737
11738/**
11739 * Interface for HM and EM to emulate the HLT instruction.
11740 *
11741 * @returns Strict VBox status code.
11742 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11743 *
11744 * @param pVCpu The cross context virtual CPU structure.
11745 * @param cbInstr The instruction length in bytes.
11746 *
11747 * @remarks Not all of the state needs to be synced in.
11748 */
11749VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11750{
11751 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11752
11753 iemInitExec(pVCpu, 0 /*fExecOpts*/);
11754 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11755 Assert(!pVCpu->iem.s.cActiveMappings);
11756 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11757}
11758
11759
11760/**
11761 * Checks if IEM is in the process of delivering an event (interrupt or
11762 * exception).
11763 *
11764 * @returns true if we're in the process of raising an interrupt or exception,
11765 * false otherwise.
11766 * @param pVCpu The cross context virtual CPU structure.
11767 * @param puVector Where to store the vector associated with the
11768 * currently delivered event, optional.
11769 * @param pfFlags Where to store th event delivery flags (see
11770 * IEM_XCPT_FLAGS_XXX), optional.
11771 * @param puErr Where to store the error code associated with the
11772 * event, optional.
11773 * @param puCr2 Where to store the CR2 associated with the event,
11774 * optional.
11775 * @remarks The caller should check the flags to determine if the error code and
11776 * CR2 are valid for the event.
11777 */
11778VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11779{
11780 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11781 if (fRaisingXcpt)
11782 {
11783 if (puVector)
11784 *puVector = pVCpu->iem.s.uCurXcpt;
11785 if (pfFlags)
11786 *pfFlags = pVCpu->iem.s.fCurXcpt;
11787 if (puErr)
11788 *puErr = pVCpu->iem.s.uCurXcptErr;
11789 if (puCr2)
11790 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11791 }
11792 return fRaisingXcpt;
11793}
11794
11795#ifdef IN_RING3
11796
11797/**
11798 * Handles the unlikely and probably fatal merge cases.
11799 *
11800 * @returns Merged status code.
11801 * @param rcStrict Current EM status code.
11802 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11803 * with @a rcStrict.
11804 * @param iMemMap The memory mapping index. For error reporting only.
11805 * @param pVCpu The cross context virtual CPU structure of the calling
11806 * thread, for error reporting only.
11807 */
11808DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11809 unsigned iMemMap, PVMCPUCC pVCpu)
11810{
11811 if (RT_FAILURE_NP(rcStrict))
11812 return rcStrict;
11813
11814 if (RT_FAILURE_NP(rcStrictCommit))
11815 return rcStrictCommit;
11816
11817 if (rcStrict == rcStrictCommit)
11818 return rcStrictCommit;
11819
11820 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11821 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11822 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11825 return VERR_IOM_FF_STATUS_IPE;
11826}
11827
11828
11829/**
11830 * Helper for IOMR3ProcessForceFlag.
11831 *
11832 * @returns Merged status code.
11833 * @param rcStrict Current EM status code.
11834 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11835 * with @a rcStrict.
11836 * @param iMemMap The memory mapping index. For error reporting only.
11837 * @param pVCpu The cross context virtual CPU structure of the calling
11838 * thread, for error reporting only.
11839 */
11840DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11841{
11842 /* Simple. */
11843 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11844 return rcStrictCommit;
11845
11846 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11847 return rcStrict;
11848
11849 /* EM scheduling status codes. */
11850 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11851 && rcStrict <= VINF_EM_LAST))
11852 {
11853 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11854 && rcStrictCommit <= VINF_EM_LAST))
11855 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11856 }
11857
11858 /* Unlikely */
11859 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11860}
11861
11862
11863/**
11864 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11865 *
11866 * @returns Merge between @a rcStrict and what the commit operation returned.
11867 * @param pVM The cross context VM structure.
11868 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11869 * @param rcStrict The status code returned by ring-0 or raw-mode.
11870 */
11871VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11872{
11873 /*
11874 * Reset the pending commit.
11875 */
11876 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11877 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11878 ("%#x %#x %#x\n",
11879 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11880 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11881
11882 /*
11883 * Commit the pending bounce buffers (usually just one).
11884 */
11885 unsigned cBufs = 0;
11886 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11887 while (iMemMap-- > 0)
11888 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11889 {
11890 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11891 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11892 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11893
11894 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11895 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11896 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11897
11898 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11899 {
11900 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11901 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11902 pbBuf,
11903 cbFirst,
11904 PGMACCESSORIGIN_IEM);
11905 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11906 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11907 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11908 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11909 }
11910
11911 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11912 {
11913 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11914 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11915 pbBuf + cbFirst,
11916 cbSecond,
11917 PGMACCESSORIGIN_IEM);
11918 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11919 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11920 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11921 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11922 }
11923 cBufs++;
11924 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11925 }
11926
11927 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11928 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11929 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11930 pVCpu->iem.s.cActiveMappings = 0;
11931 return rcStrict;
11932}
11933
11934#endif /* IN_RING3 */
11935
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette