VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 104932

Last change on this file since 104932 was 104932, checked in by vboxsync, 5 months ago

VMM/PGM,IEM: Refactored+copied PGMGstGetPage into PGMGstQueryPage that takes care of table walking, setting A & D bits and validating the access. Use new function in IEM. bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 438.8 KB
Line 
1/* $Id: IEMAll.cpp 104932 2024-06-15 00:29:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * @returns IEM_F_BRK_PENDING_XXX or zero.
202 * @param pVCpu The cross context virtual CPU structure of the
203 * calling thread.
204 *
205 * @note Don't call directly, use iemCalcExecDbgFlags instead.
206 */
207uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
208{
209 uint32_t fExec = 0;
210
211 /*
212 * Process guest breakpoints.
213 */
214#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
215 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
216 { \
217 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
218 { \
219 case X86_DR7_RW_EO: \
220 fExec |= IEM_F_PENDING_BRK_INSTR; \
221 break; \
222 case X86_DR7_RW_WO: \
223 case X86_DR7_RW_RW: \
224 fExec |= IEM_F_PENDING_BRK_DATA; \
225 break; \
226 case X86_DR7_RW_IO: \
227 fExec |= IEM_F_PENDING_BRK_X86_IO; \
228 break; \
229 } \
230 } \
231 } while (0)
232
233 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
234 if (fGstDr7 & X86_DR7_ENABLED_MASK)
235 {
236 PROCESS_ONE_BP(fGstDr7, 0);
237 PROCESS_ONE_BP(fGstDr7, 1);
238 PROCESS_ONE_BP(fGstDr7, 2);
239 PROCESS_ONE_BP(fGstDr7, 3);
240 }
241
242 /*
243 * Process hypervisor breakpoints.
244 */
245 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
246 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
247 {
248 PROCESS_ONE_BP(fHyperDr7, 0);
249 PROCESS_ONE_BP(fHyperDr7, 1);
250 PROCESS_ONE_BP(fHyperDr7, 2);
251 PROCESS_ONE_BP(fHyperDr7, 3);
252 }
253
254 return fExec;
255}
256
257
258/**
259 * Initializes the decoder state.
260 *
261 * iemReInitDecoder is mostly a copy of this function.
262 *
263 * @param pVCpu The cross context virtual CPU structure of the
264 * calling thread.
265 * @param fExecOpts Optional execution flags:
266 * - IEM_F_BYPASS_HANDLERS
267 * - IEM_F_X86_DISREGARD_LOCK
268 */
269DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
270{
271 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
272 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
281
282 /* Execution state: */
283 uint32_t fExec;
284 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
285
286 /* Decoder state: */
287 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
288 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
289 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
290 {
291 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
292 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
293 }
294 else
295 {
296 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
297 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
298 }
299 pVCpu->iem.s.fPrefixes = 0;
300 pVCpu->iem.s.uRexReg = 0;
301 pVCpu->iem.s.uRexB = 0;
302 pVCpu->iem.s.uRexIndex = 0;
303 pVCpu->iem.s.idxPrefix = 0;
304 pVCpu->iem.s.uVex3rdReg = 0;
305 pVCpu->iem.s.uVexLength = 0;
306 pVCpu->iem.s.fEvexStuff = 0;
307 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
308#ifdef IEM_WITH_CODE_TLB
309 pVCpu->iem.s.pbInstrBuf = NULL;
310 pVCpu->iem.s.offInstrNextByte = 0;
311 pVCpu->iem.s.offCurInstrStart = 0;
312# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
313 pVCpu->iem.s.offOpcode = 0;
314# endif
315# ifdef VBOX_STRICT
316 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
317 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
318 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
319 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
320# endif
321#else
322 pVCpu->iem.s.offOpcode = 0;
323 pVCpu->iem.s.cbOpcode = 0;
324#endif
325 pVCpu->iem.s.offModRm = 0;
326 pVCpu->iem.s.cActiveMappings = 0;
327 pVCpu->iem.s.iNextMapping = 0;
328 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
329
330#ifdef DBGFTRACE_ENABLED
331 switch (IEM_GET_CPU_MODE(pVCpu))
332 {
333 case IEMMODE_64BIT:
334 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
335 break;
336 case IEMMODE_32BIT:
337 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
338 break;
339 case IEMMODE_16BIT:
340 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
341 break;
342 }
343#endif
344}
345
346
347/**
348 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
349 *
350 * This is mostly a copy of iemInitDecoder.
351 *
352 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
353 */
354DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
355{
356 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
364 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
365
366 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
367 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
368 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
369
370 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
371 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
372 pVCpu->iem.s.enmEffAddrMode = enmMode;
373 if (enmMode != IEMMODE_64BIT)
374 {
375 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
376 pVCpu->iem.s.enmEffOpSize = enmMode;
377 }
378 else
379 {
380 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
381 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
382 }
383 pVCpu->iem.s.fPrefixes = 0;
384 pVCpu->iem.s.uRexReg = 0;
385 pVCpu->iem.s.uRexB = 0;
386 pVCpu->iem.s.uRexIndex = 0;
387 pVCpu->iem.s.idxPrefix = 0;
388 pVCpu->iem.s.uVex3rdReg = 0;
389 pVCpu->iem.s.uVexLength = 0;
390 pVCpu->iem.s.fEvexStuff = 0;
391 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
392#ifdef IEM_WITH_CODE_TLB
393 if (pVCpu->iem.s.pbInstrBuf)
394 {
395 uint64_t off = (enmMode == IEMMODE_64BIT
396 ? pVCpu->cpum.GstCtx.rip
397 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
398 - pVCpu->iem.s.uInstrBufPc;
399 if (off < pVCpu->iem.s.cbInstrBufTotal)
400 {
401 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
402 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
403 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
404 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
405 else
406 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
407 }
408 else
409 {
410 pVCpu->iem.s.pbInstrBuf = NULL;
411 pVCpu->iem.s.offInstrNextByte = 0;
412 pVCpu->iem.s.offCurInstrStart = 0;
413 pVCpu->iem.s.cbInstrBuf = 0;
414 pVCpu->iem.s.cbInstrBufTotal = 0;
415 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
416 }
417 }
418 else
419 {
420 pVCpu->iem.s.offInstrNextByte = 0;
421 pVCpu->iem.s.offCurInstrStart = 0;
422 pVCpu->iem.s.cbInstrBuf = 0;
423 pVCpu->iem.s.cbInstrBufTotal = 0;
424# ifdef VBOX_STRICT
425 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
426# endif
427 }
428# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
429 pVCpu->iem.s.offOpcode = 0;
430# endif
431#else /* !IEM_WITH_CODE_TLB */
432 pVCpu->iem.s.cbOpcode = 0;
433 pVCpu->iem.s.offOpcode = 0;
434#endif /* !IEM_WITH_CODE_TLB */
435 pVCpu->iem.s.offModRm = 0;
436 Assert(pVCpu->iem.s.cActiveMappings == 0);
437 pVCpu->iem.s.iNextMapping = 0;
438 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
439 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
440
441#ifdef DBGFTRACE_ENABLED
442 switch (enmMode)
443 {
444 case IEMMODE_64BIT:
445 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
446 break;
447 case IEMMODE_32BIT:
448 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
449 break;
450 case IEMMODE_16BIT:
451 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
452 break;
453 }
454#endif
455}
456
457
458
459/**
460 * Prefetch opcodes the first time when starting executing.
461 *
462 * @returns Strict VBox status code.
463 * @param pVCpu The cross context virtual CPU structure of the
464 * calling thread.
465 * @param fExecOpts Optional execution flags:
466 * - IEM_F_BYPASS_HANDLERS
467 * - IEM_F_X86_DISREGARD_LOCK
468 */
469static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
470{
471 iemInitDecoder(pVCpu, fExecOpts);
472
473#ifndef IEM_WITH_CODE_TLB
474 /*
475 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
476 *
477 * First translate CS:rIP to a physical address.
478 *
479 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
480 * all relevant bytes from the first page, as it ASSUMES it's only ever
481 * called for dealing with CS.LIM, page crossing and instructions that
482 * are too long.
483 */
484 uint32_t cbToTryRead;
485 RTGCPTR GCPtrPC;
486 if (IEM_IS_64BIT_CODE(pVCpu))
487 {
488 cbToTryRead = GUEST_PAGE_SIZE;
489 GCPtrPC = pVCpu->cpum.GstCtx.rip;
490 if (IEM_IS_CANONICAL(GCPtrPC))
491 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
492 else
493 return iemRaiseGeneralProtectionFault0(pVCpu);
494 }
495 else
496 {
497 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
498 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
499 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
500 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
501 else
502 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
503 if (cbToTryRead) { /* likely */ }
504 else /* overflowed */
505 {
506 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
507 cbToTryRead = UINT32_MAX;
508 }
509 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
510 Assert(GCPtrPC <= UINT32_MAX);
511 }
512
513 PGMPTWALKFAST WalkFast;
514 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
515 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
516 &WalkFast);
517 if (RT_SUCCESS(rc))
518 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
519 else
520 {
521 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
522# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
523/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
524 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
525 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
526 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
527# endif
528 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
529 }
530#if 0
531 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
532 else
533 {
534 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
535# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
536/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
537# error completely wrong
538 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
539 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
540# endif
541 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
542 }
543 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
544 else
545 {
546 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
547# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
548/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
549# error completely wrong.
550 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
551 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
552# endif
553 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
554 }
555#else
556 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3));
557 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
558#endif
559 RTGCPHYS const GCPhys = Walk.GCPhys;
560 /** @todo Check reserved bits and such stuff. PGM is better at doing
561 * that, so do it when implementing the guest virtual address
562 * TLB... */
563
564 /*
565 * Read the bytes at this address.
566 */
567 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
568 if (cbToTryRead > cbLeftOnPage)
569 cbToTryRead = cbLeftOnPage;
570 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
571 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
572
573 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
574 {
575 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
576 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
577 { /* likely */ }
578 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
579 {
580 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
581 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
582 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
583 }
584 else
585 {
586 Log((RT_SUCCESS(rcStrict)
587 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
588 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
589 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
590 return rcStrict;
591 }
592 }
593 else
594 {
595 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
596 if (RT_SUCCESS(rc))
597 { /* likely */ }
598 else
599 {
600 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
601 GCPtrPC, GCPhys, rc, cbToTryRead));
602 return rc;
603 }
604 }
605 pVCpu->iem.s.cbOpcode = cbToTryRead;
606#endif /* !IEM_WITH_CODE_TLB */
607 return VINF_SUCCESS;
608}
609
610
611/**
612 * Invalidates the IEM TLBs.
613 *
614 * This is called internally as well as by PGM when moving GC mappings.
615 *
616 * @param pVCpu The cross context virtual CPU structure of the calling
617 * thread.
618 */
619VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
620{
621#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
622 Log10(("IEMTlbInvalidateAll\n"));
623# ifdef IEM_WITH_CODE_TLB
624 pVCpu->iem.s.cbInstrBufTotal = 0;
625 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
626 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
627 { /* very likely */ }
628 else
629 {
630 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
631 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
632 while (i-- > 0)
633 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
634 }
635# endif
636
637# ifdef IEM_WITH_DATA_TLB
638 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
639 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
640 { /* very likely */ }
641 else
642 {
643 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
644 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
645 while (i-- > 0)
646 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
647 }
648# endif
649#else
650 RT_NOREF(pVCpu);
651#endif
652}
653
654
655/**
656 * Invalidates a page in the TLBs.
657 *
658 * @param pVCpu The cross context virtual CPU structure of the calling
659 * thread.
660 * @param GCPtr The address of the page to invalidate
661 * @thread EMT(pVCpu)
662 */
663VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
664{
665#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
666 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
667 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
668 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
669 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
670
671# ifdef IEM_WITH_CODE_TLB
672 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
673 {
674 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
675 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
676 pVCpu->iem.s.cbInstrBufTotal = 0;
677 }
678# endif
679
680# ifdef IEM_WITH_DATA_TLB
681 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
682 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
683# endif
684#else
685 NOREF(pVCpu); NOREF(GCPtr);
686#endif
687}
688
689
690#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
691/**
692 * Invalid both TLBs slow fashion following a rollover.
693 *
694 * Worker for IEMTlbInvalidateAllPhysical,
695 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
696 * iemMemMapJmp and others.
697 *
698 * @thread EMT(pVCpu)
699 */
700static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
701{
702 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
703 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
704 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
705
706 unsigned i;
707# ifdef IEM_WITH_CODE_TLB
708 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
709 while (i-- > 0)
710 {
711 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
712 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
713 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
714 }
715# endif
716# ifdef IEM_WITH_DATA_TLB
717 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
718 while (i-- > 0)
719 {
720 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
721 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
722 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
723 }
724# endif
725
726}
727#endif
728
729
730/**
731 * Invalidates the host physical aspects of the IEM TLBs.
732 *
733 * This is called internally as well as by PGM when moving GC mappings.
734 *
735 * @param pVCpu The cross context virtual CPU structure of the calling
736 * thread.
737 * @note Currently not used.
738 */
739VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
740{
741#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
742 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
743 Log10(("IEMTlbInvalidateAllPhysical\n"));
744
745# ifdef IEM_WITH_CODE_TLB
746 pVCpu->iem.s.cbInstrBufTotal = 0;
747# endif
748 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
749 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
750 {
751 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
752 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
753 }
754 else
755 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
756#else
757 NOREF(pVCpu);
758#endif
759}
760
761
762/**
763 * Invalidates the host physical aspects of the IEM TLBs.
764 *
765 * This is called internally as well as by PGM when moving GC mappings.
766 *
767 * @param pVM The cross context VM structure.
768 * @param idCpuCaller The ID of the calling EMT if available to the caller,
769 * otherwise NIL_VMCPUID.
770 * @param enmReason The reason we're called.
771 *
772 * @remarks Caller holds the PGM lock.
773 */
774VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
775{
776#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
777 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
778 if (pVCpuCaller)
779 VMCPU_ASSERT_EMT(pVCpuCaller);
780 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
781
782 VMCC_FOR_EACH_VMCPU(pVM)
783 {
784# ifdef IEM_WITH_CODE_TLB
785 if (pVCpuCaller == pVCpu)
786 pVCpu->iem.s.cbInstrBufTotal = 0;
787# endif
788
789 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
790 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
791 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
792 { /* likely */}
793 else if (pVCpuCaller != pVCpu)
794 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
795 else
796 {
797 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
798 continue;
799 }
800 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
801 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
802 }
803 VMCC_FOR_EACH_VMCPU_END(pVM);
804
805#else
806 RT_NOREF(pVM, idCpuCaller, enmReason);
807#endif
808}
809
810
811/**
812 * Flushes the prefetch buffer, light version.
813 */
814void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
815{
816#ifndef IEM_WITH_CODE_TLB
817 pVCpu->iem.s.cbOpcode = cbInstr;
818#else
819 RT_NOREF(pVCpu, cbInstr);
820#endif
821}
822
823
824/**
825 * Flushes the prefetch buffer, heavy version.
826 */
827void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
828{
829#ifndef IEM_WITH_CODE_TLB
830 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
831#elif 1
832 pVCpu->iem.s.cbInstrBufTotal = 0;
833 RT_NOREF(cbInstr);
834#else
835 RT_NOREF(pVCpu, cbInstr);
836#endif
837}
838
839
840
841#ifdef IEM_WITH_CODE_TLB
842
843/**
844 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
845 * failure and jumps.
846 *
847 * We end up here for a number of reasons:
848 * - pbInstrBuf isn't yet initialized.
849 * - Advancing beyond the buffer boundrary (e.g. cross page).
850 * - Advancing beyond the CS segment limit.
851 * - Fetching from non-mappable page (e.g. MMIO).
852 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
853 *
854 * @param pVCpu The cross context virtual CPU structure of the
855 * calling thread.
856 * @param pvDst Where to return the bytes.
857 * @param cbDst Number of bytes to read. A value of zero is
858 * allowed for initializing pbInstrBuf (the
859 * recompiler does this). In this case it is best
860 * to set pbInstrBuf to NULL prior to the call.
861 */
862void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
863{
864# ifdef IN_RING3
865 for (;;)
866 {
867 Assert(cbDst <= 8);
868 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
869
870 /*
871 * We might have a partial buffer match, deal with that first to make the
872 * rest simpler. This is the first part of the cross page/buffer case.
873 */
874 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
875 if (pbInstrBuf != NULL)
876 {
877 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
878 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
879 if (offBuf < cbInstrBuf)
880 {
881 Assert(offBuf + cbDst > cbInstrBuf);
882 uint32_t const cbCopy = cbInstrBuf - offBuf;
883 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
884
885 cbDst -= cbCopy;
886 pvDst = (uint8_t *)pvDst + cbCopy;
887 offBuf += cbCopy;
888 }
889 }
890
891 /*
892 * Check segment limit, figuring how much we're allowed to access at this point.
893 *
894 * We will fault immediately if RIP is past the segment limit / in non-canonical
895 * territory. If we do continue, there are one or more bytes to read before we
896 * end up in trouble and we need to do that first before faulting.
897 */
898 RTGCPTR GCPtrFirst;
899 uint32_t cbMaxRead;
900 if (IEM_IS_64BIT_CODE(pVCpu))
901 {
902 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
903 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
904 { /* likely */ }
905 else
906 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
907 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
908 }
909 else
910 {
911 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
912 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
913 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
914 { /* likely */ }
915 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
916 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
917 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
918 if (cbMaxRead != 0)
919 { /* likely */ }
920 else
921 {
922 /* Overflowed because address is 0 and limit is max. */
923 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
924 cbMaxRead = X86_PAGE_SIZE;
925 }
926 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
927 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
928 if (cbMaxRead2 < cbMaxRead)
929 cbMaxRead = cbMaxRead2;
930 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
931 }
932
933 /*
934 * Get the TLB entry for this piece of code.
935 */
936 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
937 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
938 if (pTlbe->uTag == uTag)
939 {
940 /* likely when executing lots of code, otherwise unlikely */
941# ifdef VBOX_WITH_STATISTICS
942 pVCpu->iem.s.CodeTlb.cTlbHits++;
943# endif
944 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
945
946 /* Check TLB page table level access flags. */
947 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
948 {
949 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
950 {
951 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
952 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
953 }
954 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
955 {
956 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
957 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
958 }
959 }
960
961 /* Look up the physical page info if necessary. */
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
963 { /* not necessary */ }
964 else
965 {
966 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
967 { /* likely */ }
968 else
969 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
970 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
971 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
972 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
973 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
974 }
975 }
976 else
977 {
978 pVCpu->iem.s.CodeTlb.cTlbMisses++;
979
980 /* This page table walking will set A bits as required by the access while performing the walk.
981 ASSUMES these are set when the address is translated rather than on commit... */
982 /** @todo testcase: check when A bits are actually set by the CPU for code. */
983 PGMPTWALKFAST WalkFast;
984 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
985 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
986 &WalkFast);
987 if (RT_SUCCESS(rc))
988 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
989 else
990 {
991#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
992 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
993 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
994#endif
995 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
996 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
997 }
998
999 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1000 pTlbe->uTag = uTag;
1001 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1002 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/;
1003 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1004 pTlbe->GCPhys = GCPhysPg;
1005 pTlbe->pbMappingR3 = NULL;
1006 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1007 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1008 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1009
1010 /* Resolve the physical address. */
1011 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1012 { /* likely */ }
1013 else
1014 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1015 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1016 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1017 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1018 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1019 }
1020
1021# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1022 /*
1023 * Try do a direct read using the pbMappingR3 pointer.
1024 * Note! Do not recheck the physical TLB revision number here as we have the
1025 * wrong response to changes in the else case. If someone is updating
1026 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1027 * pretending we always won the race.
1028 */
1029 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1030 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1031 {
1032 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1033 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1034 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1035 {
1036 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1037 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1038 }
1039 else
1040 {
1041 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1042 if (cbInstr + (uint32_t)cbDst <= 15)
1043 {
1044 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1045 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1046 }
1047 else
1048 {
1049 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1050 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1051 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1052 }
1053 }
1054 if (cbDst <= cbMaxRead)
1055 {
1056 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1057 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1058
1059 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1060 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1061 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1062 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1063 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1064 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1065 else
1066 Assert(!pvDst);
1067 return;
1068 }
1069 pVCpu->iem.s.pbInstrBuf = NULL;
1070
1071 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1072 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1073 }
1074# else
1075# error "refactor as needed"
1076 /*
1077 * If there is no special read handling, so we can read a bit more and
1078 * put it in the prefetch buffer.
1079 */
1080 if ( cbDst < cbMaxRead
1081 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1082 {
1083 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1084 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1085 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1086 { /* likely */ }
1087 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1088 {
1089 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1090 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1091 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1092 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1093 }
1094 else
1095 {
1096 Log((RT_SUCCESS(rcStrict)
1097 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1098 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1099 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1100 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1101 }
1102 }
1103# endif
1104 /*
1105 * Special read handling, so only read exactly what's needed.
1106 * This is a highly unlikely scenario.
1107 */
1108 else
1109 {
1110 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1111
1112 /* Check instruction length. */
1113 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1114 if (RT_LIKELY(cbInstr + cbDst <= 15))
1115 { /* likely */ }
1116 else
1117 {
1118 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1119 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1120 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1121 }
1122
1123 /* Do the reading. */
1124 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1125 if (cbToRead > 0)
1126 {
1127 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1128 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1129 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1130 { /* likely */ }
1131 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1132 {
1133 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1134 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1135 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1136 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1137 }
1138 else
1139 {
1140 Log((RT_SUCCESS(rcStrict)
1141 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1142 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1143 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1144 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1145 }
1146 }
1147
1148 /* Update the state and probably return. */
1149 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1150 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1151 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1152
1153 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1154 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1155 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1156 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1157 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1158 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1159 pVCpu->iem.s.pbInstrBuf = NULL;
1160 if (cbToRead == cbDst)
1161 return;
1162 Assert(cbToRead == cbMaxRead);
1163 }
1164
1165 /*
1166 * More to read, loop.
1167 */
1168 cbDst -= cbMaxRead;
1169 pvDst = (uint8_t *)pvDst + cbMaxRead;
1170 }
1171# else /* !IN_RING3 */
1172 RT_NOREF(pvDst, cbDst);
1173 if (pvDst || cbDst)
1174 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1175# endif /* !IN_RING3 */
1176}
1177
1178#else /* !IEM_WITH_CODE_TLB */
1179
1180/**
1181 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1182 * exception if it fails.
1183 *
1184 * @returns Strict VBox status code.
1185 * @param pVCpu The cross context virtual CPU structure of the
1186 * calling thread.
1187 * @param cbMin The minimum number of bytes relative offOpcode
1188 * that must be read.
1189 */
1190VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1191{
1192 /*
1193 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1194 *
1195 * First translate CS:rIP to a physical address.
1196 */
1197 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1198 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1199 uint8_t const cbLeft = cbOpcode - offOpcode;
1200 Assert(cbLeft < cbMin);
1201 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1202
1203 uint32_t cbToTryRead;
1204 RTGCPTR GCPtrNext;
1205 if (IEM_IS_64BIT_CODE(pVCpu))
1206 {
1207 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1208 if (!IEM_IS_CANONICAL(GCPtrNext))
1209 return iemRaiseGeneralProtectionFault0(pVCpu);
1210 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1211 }
1212 else
1213 {
1214 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1215 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1216 GCPtrNext32 += cbOpcode;
1217 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1218 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1219 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1220 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1221 if (!cbToTryRead) /* overflowed */
1222 {
1223 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1224 cbToTryRead = UINT32_MAX;
1225 /** @todo check out wrapping around the code segment. */
1226 }
1227 if (cbToTryRead < cbMin - cbLeft)
1228 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1229 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1230
1231 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1232 if (cbToTryRead > cbLeftOnPage)
1233 cbToTryRead = cbLeftOnPage;
1234 }
1235
1236 /* Restrict to opcode buffer space.
1237
1238 We're making ASSUMPTIONS here based on work done previously in
1239 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1240 be fetched in case of an instruction crossing two pages. */
1241 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1242 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1243 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1244 { /* likely */ }
1245 else
1246 {
1247 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1248 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1249 return iemRaiseGeneralProtectionFault0(pVCpu);
1250 }
1251
1252 PGMPTWALK Walk;
1253 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1254 if (RT_FAILURE(rc))
1255 {
1256 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1257#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1258 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1259 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1260#endif
1261 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1262 }
1263 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1264 {
1265 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1266#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1267 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1268 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1269#endif
1270 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1271 }
1272 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1273 {
1274 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1275#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1276 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1277 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1278#endif
1279 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1280 }
1281 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1282 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1283 /** @todo Check reserved bits and such stuff. PGM is better at doing
1284 * that, so do it when implementing the guest virtual address
1285 * TLB... */
1286
1287 /*
1288 * Read the bytes at this address.
1289 *
1290 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1291 * and since PATM should only patch the start of an instruction there
1292 * should be no need to check again here.
1293 */
1294 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1295 {
1296 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1297 cbToTryRead, PGMACCESSORIGIN_IEM);
1298 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1299 { /* likely */ }
1300 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1301 {
1302 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1303 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1304 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1305 }
1306 else
1307 {
1308 Log((RT_SUCCESS(rcStrict)
1309 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1310 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1311 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1312 return rcStrict;
1313 }
1314 }
1315 else
1316 {
1317 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1318 if (RT_SUCCESS(rc))
1319 { /* likely */ }
1320 else
1321 {
1322 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1323 return rc;
1324 }
1325 }
1326 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1327 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1328
1329 return VINF_SUCCESS;
1330}
1331
1332#endif /* !IEM_WITH_CODE_TLB */
1333#ifndef IEM_WITH_SETJMP
1334
1335/**
1336 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1337 *
1338 * @returns Strict VBox status code.
1339 * @param pVCpu The cross context virtual CPU structure of the
1340 * calling thread.
1341 * @param pb Where to return the opcode byte.
1342 */
1343VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1344{
1345 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1346 if (rcStrict == VINF_SUCCESS)
1347 {
1348 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1349 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1350 pVCpu->iem.s.offOpcode = offOpcode + 1;
1351 }
1352 else
1353 *pb = 0;
1354 return rcStrict;
1355}
1356
1357#else /* IEM_WITH_SETJMP */
1358
1359/**
1360 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1361 *
1362 * @returns The opcode byte.
1363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1364 */
1365uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1366{
1367# ifdef IEM_WITH_CODE_TLB
1368 uint8_t u8;
1369 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1370 return u8;
1371# else
1372 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1373 if (rcStrict == VINF_SUCCESS)
1374 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1375 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1376# endif
1377}
1378
1379#endif /* IEM_WITH_SETJMP */
1380
1381#ifndef IEM_WITH_SETJMP
1382
1383/**
1384 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1385 *
1386 * @returns Strict VBox status code.
1387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1388 * @param pu16 Where to return the opcode dword.
1389 */
1390VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1391{
1392 uint8_t u8;
1393 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1394 if (rcStrict == VINF_SUCCESS)
1395 *pu16 = (int8_t)u8;
1396 return rcStrict;
1397}
1398
1399
1400/**
1401 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1402 *
1403 * @returns Strict VBox status code.
1404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1405 * @param pu32 Where to return the opcode dword.
1406 */
1407VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1408{
1409 uint8_t u8;
1410 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1411 if (rcStrict == VINF_SUCCESS)
1412 *pu32 = (int8_t)u8;
1413 return rcStrict;
1414}
1415
1416
1417/**
1418 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1419 *
1420 * @returns Strict VBox status code.
1421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1422 * @param pu64 Where to return the opcode qword.
1423 */
1424VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1425{
1426 uint8_t u8;
1427 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1428 if (rcStrict == VINF_SUCCESS)
1429 *pu64 = (int8_t)u8;
1430 return rcStrict;
1431}
1432
1433#endif /* !IEM_WITH_SETJMP */
1434
1435
1436#ifndef IEM_WITH_SETJMP
1437
1438/**
1439 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1440 *
1441 * @returns Strict VBox status code.
1442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1443 * @param pu16 Where to return the opcode word.
1444 */
1445VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1446{
1447 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1448 if (rcStrict == VINF_SUCCESS)
1449 {
1450 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1452 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1453# else
1454 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1455# endif
1456 pVCpu->iem.s.offOpcode = offOpcode + 2;
1457 }
1458 else
1459 *pu16 = 0;
1460 return rcStrict;
1461}
1462
1463#else /* IEM_WITH_SETJMP */
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1467 *
1468 * @returns The opcode word.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 */
1471uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1472{
1473# ifdef IEM_WITH_CODE_TLB
1474 uint16_t u16;
1475 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1476 return u16;
1477# else
1478 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1479 if (rcStrict == VINF_SUCCESS)
1480 {
1481 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1482 pVCpu->iem.s.offOpcode += 2;
1483# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1484 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1485# else
1486 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1487# endif
1488 }
1489 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1490# endif
1491}
1492
1493#endif /* IEM_WITH_SETJMP */
1494
1495#ifndef IEM_WITH_SETJMP
1496
1497/**
1498 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1499 *
1500 * @returns Strict VBox status code.
1501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1502 * @param pu32 Where to return the opcode double word.
1503 */
1504VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1505{
1506 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1507 if (rcStrict == VINF_SUCCESS)
1508 {
1509 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1510 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1511 pVCpu->iem.s.offOpcode = offOpcode + 2;
1512 }
1513 else
1514 *pu32 = 0;
1515 return rcStrict;
1516}
1517
1518
1519/**
1520 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1521 *
1522 * @returns Strict VBox status code.
1523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1524 * @param pu64 Where to return the opcode quad word.
1525 */
1526VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1527{
1528 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1529 if (rcStrict == VINF_SUCCESS)
1530 {
1531 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1532 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1533 pVCpu->iem.s.offOpcode = offOpcode + 2;
1534 }
1535 else
1536 *pu64 = 0;
1537 return rcStrict;
1538}
1539
1540#endif /* !IEM_WITH_SETJMP */
1541
1542#ifndef IEM_WITH_SETJMP
1543
1544/**
1545 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1546 *
1547 * @returns Strict VBox status code.
1548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1549 * @param pu32 Where to return the opcode dword.
1550 */
1551VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1552{
1553 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1554 if (rcStrict == VINF_SUCCESS)
1555 {
1556 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1557# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1558 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1559# else
1560 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1561 pVCpu->iem.s.abOpcode[offOpcode + 1],
1562 pVCpu->iem.s.abOpcode[offOpcode + 2],
1563 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1564# endif
1565 pVCpu->iem.s.offOpcode = offOpcode + 4;
1566 }
1567 else
1568 *pu32 = 0;
1569 return rcStrict;
1570}
1571
1572#else /* IEM_WITH_SETJMP */
1573
1574/**
1575 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1576 *
1577 * @returns The opcode dword.
1578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1579 */
1580uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1581{
1582# ifdef IEM_WITH_CODE_TLB
1583 uint32_t u32;
1584 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1585 return u32;
1586# else
1587 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1588 if (rcStrict == VINF_SUCCESS)
1589 {
1590 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1591 pVCpu->iem.s.offOpcode = offOpcode + 4;
1592# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1593 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1594# else
1595 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1596 pVCpu->iem.s.abOpcode[offOpcode + 1],
1597 pVCpu->iem.s.abOpcode[offOpcode + 2],
1598 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1599# endif
1600 }
1601 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1602# endif
1603}
1604
1605#endif /* IEM_WITH_SETJMP */
1606
1607#ifndef IEM_WITH_SETJMP
1608
1609/**
1610 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1611 *
1612 * @returns Strict VBox status code.
1613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1614 * @param pu64 Where to return the opcode dword.
1615 */
1616VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1617{
1618 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1619 if (rcStrict == VINF_SUCCESS)
1620 {
1621 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1622 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1623 pVCpu->iem.s.abOpcode[offOpcode + 1],
1624 pVCpu->iem.s.abOpcode[offOpcode + 2],
1625 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1626 pVCpu->iem.s.offOpcode = offOpcode + 4;
1627 }
1628 else
1629 *pu64 = 0;
1630 return rcStrict;
1631}
1632
1633
1634/**
1635 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1636 *
1637 * @returns Strict VBox status code.
1638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1639 * @param pu64 Where to return the opcode qword.
1640 */
1641VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1642{
1643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1644 if (rcStrict == VINF_SUCCESS)
1645 {
1646 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1647 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1648 pVCpu->iem.s.abOpcode[offOpcode + 1],
1649 pVCpu->iem.s.abOpcode[offOpcode + 2],
1650 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1651 pVCpu->iem.s.offOpcode = offOpcode + 4;
1652 }
1653 else
1654 *pu64 = 0;
1655 return rcStrict;
1656}
1657
1658#endif /* !IEM_WITH_SETJMP */
1659
1660#ifndef IEM_WITH_SETJMP
1661
1662/**
1663 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1664 *
1665 * @returns Strict VBox status code.
1666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1667 * @param pu64 Where to return the opcode qword.
1668 */
1669VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1670{
1671 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1672 if (rcStrict == VINF_SUCCESS)
1673 {
1674 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1675# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1676 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1677# else
1678 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1679 pVCpu->iem.s.abOpcode[offOpcode + 1],
1680 pVCpu->iem.s.abOpcode[offOpcode + 2],
1681 pVCpu->iem.s.abOpcode[offOpcode + 3],
1682 pVCpu->iem.s.abOpcode[offOpcode + 4],
1683 pVCpu->iem.s.abOpcode[offOpcode + 5],
1684 pVCpu->iem.s.abOpcode[offOpcode + 6],
1685 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1686# endif
1687 pVCpu->iem.s.offOpcode = offOpcode + 8;
1688 }
1689 else
1690 *pu64 = 0;
1691 return rcStrict;
1692}
1693
1694#else /* IEM_WITH_SETJMP */
1695
1696/**
1697 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1698 *
1699 * @returns The opcode qword.
1700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1701 */
1702uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1703{
1704# ifdef IEM_WITH_CODE_TLB
1705 uint64_t u64;
1706 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1707 return u64;
1708# else
1709 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1710 if (rcStrict == VINF_SUCCESS)
1711 {
1712 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1713 pVCpu->iem.s.offOpcode = offOpcode + 8;
1714# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1715 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1716# else
1717 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1718 pVCpu->iem.s.abOpcode[offOpcode + 1],
1719 pVCpu->iem.s.abOpcode[offOpcode + 2],
1720 pVCpu->iem.s.abOpcode[offOpcode + 3],
1721 pVCpu->iem.s.abOpcode[offOpcode + 4],
1722 pVCpu->iem.s.abOpcode[offOpcode + 5],
1723 pVCpu->iem.s.abOpcode[offOpcode + 6],
1724 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1725# endif
1726 }
1727 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1728# endif
1729}
1730
1731#endif /* IEM_WITH_SETJMP */
1732
1733
1734
1735/** @name Misc Worker Functions.
1736 * @{
1737 */
1738
1739/**
1740 * Gets the exception class for the specified exception vector.
1741 *
1742 * @returns The class of the specified exception.
1743 * @param uVector The exception vector.
1744 */
1745static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1746{
1747 Assert(uVector <= X86_XCPT_LAST);
1748 switch (uVector)
1749 {
1750 case X86_XCPT_DE:
1751 case X86_XCPT_TS:
1752 case X86_XCPT_NP:
1753 case X86_XCPT_SS:
1754 case X86_XCPT_GP:
1755 case X86_XCPT_SX: /* AMD only */
1756 return IEMXCPTCLASS_CONTRIBUTORY;
1757
1758 case X86_XCPT_PF:
1759 case X86_XCPT_VE: /* Intel only */
1760 return IEMXCPTCLASS_PAGE_FAULT;
1761
1762 case X86_XCPT_DF:
1763 return IEMXCPTCLASS_DOUBLE_FAULT;
1764 }
1765 return IEMXCPTCLASS_BENIGN;
1766}
1767
1768
1769/**
1770 * Evaluates how to handle an exception caused during delivery of another event
1771 * (exception / interrupt).
1772 *
1773 * @returns How to handle the recursive exception.
1774 * @param pVCpu The cross context virtual CPU structure of the
1775 * calling thread.
1776 * @param fPrevFlags The flags of the previous event.
1777 * @param uPrevVector The vector of the previous event.
1778 * @param fCurFlags The flags of the current exception.
1779 * @param uCurVector The vector of the current exception.
1780 * @param pfXcptRaiseInfo Where to store additional information about the
1781 * exception condition. Optional.
1782 */
1783VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1784 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1785{
1786 /*
1787 * Only CPU exceptions can be raised while delivering other events, software interrupt
1788 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1789 */
1790 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1791 Assert(pVCpu); RT_NOREF(pVCpu);
1792 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1793
1794 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1795 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1796 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1797 {
1798 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1799 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1800 {
1801 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1802 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1803 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1804 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1805 {
1806 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1807 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1808 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1809 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1810 uCurVector, pVCpu->cpum.GstCtx.cr2));
1811 }
1812 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1813 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1814 {
1815 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1816 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1817 }
1818 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1819 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1820 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1821 {
1822 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1823 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1824 }
1825 }
1826 else
1827 {
1828 if (uPrevVector == X86_XCPT_NMI)
1829 {
1830 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1831 if (uCurVector == X86_XCPT_PF)
1832 {
1833 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1834 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1835 }
1836 }
1837 else if ( uPrevVector == X86_XCPT_AC
1838 && uCurVector == X86_XCPT_AC)
1839 {
1840 enmRaise = IEMXCPTRAISE_CPU_HANG;
1841 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1842 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1843 }
1844 }
1845 }
1846 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1847 {
1848 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1849 if (uCurVector == X86_XCPT_PF)
1850 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1851 }
1852 else
1853 {
1854 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1855 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1856 }
1857
1858 if (pfXcptRaiseInfo)
1859 *pfXcptRaiseInfo = fRaiseInfo;
1860 return enmRaise;
1861}
1862
1863
1864/**
1865 * Enters the CPU shutdown state initiated by a triple fault or other
1866 * unrecoverable conditions.
1867 *
1868 * @returns Strict VBox status code.
1869 * @param pVCpu The cross context virtual CPU structure of the
1870 * calling thread.
1871 */
1872static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1873{
1874 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1875 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1876
1877 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1878 {
1879 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1880 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1881 }
1882
1883 RT_NOREF(pVCpu);
1884 return VINF_EM_TRIPLE_FAULT;
1885}
1886
1887
1888/**
1889 * Validates a new SS segment.
1890 *
1891 * @returns VBox strict status code.
1892 * @param pVCpu The cross context virtual CPU structure of the
1893 * calling thread.
1894 * @param NewSS The new SS selctor.
1895 * @param uCpl The CPL to load the stack for.
1896 * @param pDesc Where to return the descriptor.
1897 */
1898static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1899{
1900 /* Null selectors are not allowed (we're not called for dispatching
1901 interrupts with SS=0 in long mode). */
1902 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1903 {
1904 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1905 return iemRaiseTaskSwitchFault0(pVCpu);
1906 }
1907
1908 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1909 if ((NewSS & X86_SEL_RPL) != uCpl)
1910 {
1911 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1912 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1913 }
1914
1915 /*
1916 * Read the descriptor.
1917 */
1918 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1919 if (rcStrict != VINF_SUCCESS)
1920 return rcStrict;
1921
1922 /*
1923 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1924 */
1925 if (!pDesc->Legacy.Gen.u1DescType)
1926 {
1927 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1928 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1929 }
1930
1931 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1932 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1933 {
1934 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1935 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1936 }
1937 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1938 {
1939 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1940 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1941 }
1942
1943 /* Is it there? */
1944 /** @todo testcase: Is this checked before the canonical / limit check below? */
1945 if (!pDesc->Legacy.Gen.u1Present)
1946 {
1947 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1948 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1949 }
1950
1951 return VINF_SUCCESS;
1952}
1953
1954/** @} */
1955
1956
1957/** @name Raising Exceptions.
1958 *
1959 * @{
1960 */
1961
1962
1963/**
1964 * Loads the specified stack far pointer from the TSS.
1965 *
1966 * @returns VBox strict status code.
1967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1968 * @param uCpl The CPL to load the stack for.
1969 * @param pSelSS Where to return the new stack segment.
1970 * @param puEsp Where to return the new stack pointer.
1971 */
1972static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1973{
1974 VBOXSTRICTRC rcStrict;
1975 Assert(uCpl < 4);
1976
1977 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1978 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1979 {
1980 /*
1981 * 16-bit TSS (X86TSS16).
1982 */
1983 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1984 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1985 {
1986 uint32_t off = uCpl * 4 + 2;
1987 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1988 {
1989 /** @todo check actual access pattern here. */
1990 uint32_t u32Tmp = 0; /* gcc maybe... */
1991 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1992 if (rcStrict == VINF_SUCCESS)
1993 {
1994 *puEsp = RT_LOWORD(u32Tmp);
1995 *pSelSS = RT_HIWORD(u32Tmp);
1996 return VINF_SUCCESS;
1997 }
1998 }
1999 else
2000 {
2001 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2002 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2003 }
2004 break;
2005 }
2006
2007 /*
2008 * 32-bit TSS (X86TSS32).
2009 */
2010 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2011 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2012 {
2013 uint32_t off = uCpl * 8 + 4;
2014 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2015 {
2016/** @todo check actual access pattern here. */
2017 uint64_t u64Tmp;
2018 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2019 if (rcStrict == VINF_SUCCESS)
2020 {
2021 *puEsp = u64Tmp & UINT32_MAX;
2022 *pSelSS = (RTSEL)(u64Tmp >> 32);
2023 return VINF_SUCCESS;
2024 }
2025 }
2026 else
2027 {
2028 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2029 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2030 }
2031 break;
2032 }
2033
2034 default:
2035 AssertFailed();
2036 rcStrict = VERR_IEM_IPE_4;
2037 break;
2038 }
2039
2040 *puEsp = 0; /* make gcc happy */
2041 *pSelSS = 0; /* make gcc happy */
2042 return rcStrict;
2043}
2044
2045
2046/**
2047 * Loads the specified stack pointer from the 64-bit TSS.
2048 *
2049 * @returns VBox strict status code.
2050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2051 * @param uCpl The CPL to load the stack for.
2052 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2053 * @param puRsp Where to return the new stack pointer.
2054 */
2055static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2056{
2057 Assert(uCpl < 4);
2058 Assert(uIst < 8);
2059 *puRsp = 0; /* make gcc happy */
2060
2061 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2062 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2063
2064 uint32_t off;
2065 if (uIst)
2066 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2067 else
2068 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2069 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2070 {
2071 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2072 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2073 }
2074
2075 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2076}
2077
2078
2079/**
2080 * Adjust the CPU state according to the exception being raised.
2081 *
2082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2083 * @param u8Vector The exception that has been raised.
2084 */
2085DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2086{
2087 switch (u8Vector)
2088 {
2089 case X86_XCPT_DB:
2090 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2091 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2092 break;
2093 /** @todo Read the AMD and Intel exception reference... */
2094 }
2095}
2096
2097
2098/**
2099 * Implements exceptions and interrupts for real mode.
2100 *
2101 * @returns VBox strict status code.
2102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2103 * @param cbInstr The number of bytes to offset rIP by in the return
2104 * address.
2105 * @param u8Vector The interrupt / exception vector number.
2106 * @param fFlags The flags.
2107 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2108 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2109 */
2110static VBOXSTRICTRC
2111iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2112 uint8_t cbInstr,
2113 uint8_t u8Vector,
2114 uint32_t fFlags,
2115 uint16_t uErr,
2116 uint64_t uCr2) RT_NOEXCEPT
2117{
2118 NOREF(uErr); NOREF(uCr2);
2119 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2120
2121 /*
2122 * Read the IDT entry.
2123 */
2124 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2125 {
2126 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2127 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2128 }
2129 RTFAR16 Idte;
2130 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2131 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2132 {
2133 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2134 return rcStrict;
2135 }
2136
2137#ifdef LOG_ENABLED
2138 /* If software interrupt, try decode it if logging is enabled and such. */
2139 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2140 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2141 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2142#endif
2143
2144 /*
2145 * Push the stack frame.
2146 */
2147 uint8_t bUnmapInfo;
2148 uint16_t *pu16Frame;
2149 uint64_t uNewRsp;
2150 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2151 if (rcStrict != VINF_SUCCESS)
2152 return rcStrict;
2153
2154 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2155#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2156 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2157 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2158 fEfl |= UINT16_C(0xf000);
2159#endif
2160 pu16Frame[2] = (uint16_t)fEfl;
2161 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2162 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2163 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2164 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2165 return rcStrict;
2166
2167 /*
2168 * Load the vector address into cs:ip and make exception specific state
2169 * adjustments.
2170 */
2171 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2172 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2173 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2174 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2175 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2176 pVCpu->cpum.GstCtx.rip = Idte.off;
2177 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2178 IEMMISC_SET_EFL(pVCpu, fEfl);
2179
2180 /** @todo do we actually do this in real mode? */
2181 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2182 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2183
2184 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2185 so best leave them alone in case we're in a weird kind of real mode... */
2186
2187 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2188}
2189
2190
2191/**
2192 * Loads a NULL data selector into when coming from V8086 mode.
2193 *
2194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2195 * @param pSReg Pointer to the segment register.
2196 */
2197DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2198{
2199 pSReg->Sel = 0;
2200 pSReg->ValidSel = 0;
2201 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2202 {
2203 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2204 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2205 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2206 }
2207 else
2208 {
2209 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2210 /** @todo check this on AMD-V */
2211 pSReg->u64Base = 0;
2212 pSReg->u32Limit = 0;
2213 }
2214}
2215
2216
2217/**
2218 * Loads a segment selector during a task switch in V8086 mode.
2219 *
2220 * @param pSReg Pointer to the segment register.
2221 * @param uSel The selector value to load.
2222 */
2223DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2224{
2225 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2226 pSReg->Sel = uSel;
2227 pSReg->ValidSel = uSel;
2228 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2229 pSReg->u64Base = uSel << 4;
2230 pSReg->u32Limit = 0xffff;
2231 pSReg->Attr.u = 0xf3;
2232}
2233
2234
2235/**
2236 * Loads a segment selector during a task switch in protected mode.
2237 *
2238 * In this task switch scenario, we would throw \#TS exceptions rather than
2239 * \#GPs.
2240 *
2241 * @returns VBox strict status code.
2242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2243 * @param pSReg Pointer to the segment register.
2244 * @param uSel The new selector value.
2245 *
2246 * @remarks This does _not_ handle CS or SS.
2247 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2248 */
2249static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2250{
2251 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2252
2253 /* Null data selector. */
2254 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2255 {
2256 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2258 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2259 return VINF_SUCCESS;
2260 }
2261
2262 /* Fetch the descriptor. */
2263 IEMSELDESC Desc;
2264 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2265 if (rcStrict != VINF_SUCCESS)
2266 {
2267 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2268 VBOXSTRICTRC_VAL(rcStrict)));
2269 return rcStrict;
2270 }
2271
2272 /* Must be a data segment or readable code segment. */
2273 if ( !Desc.Legacy.Gen.u1DescType
2274 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2275 {
2276 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2277 Desc.Legacy.Gen.u4Type));
2278 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2279 }
2280
2281 /* Check privileges for data segments and non-conforming code segments. */
2282 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2283 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2284 {
2285 /* The RPL and the new CPL must be less than or equal to the DPL. */
2286 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2287 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2288 {
2289 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2290 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2291 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2292 }
2293 }
2294
2295 /* Is it there? */
2296 if (!Desc.Legacy.Gen.u1Present)
2297 {
2298 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2299 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2300 }
2301
2302 /* The base and limit. */
2303 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2304 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2305
2306 /*
2307 * Ok, everything checked out fine. Now set the accessed bit before
2308 * committing the result into the registers.
2309 */
2310 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2311 {
2312 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2313 if (rcStrict != VINF_SUCCESS)
2314 return rcStrict;
2315 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2316 }
2317
2318 /* Commit */
2319 pSReg->Sel = uSel;
2320 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2321 pSReg->u32Limit = cbLimit;
2322 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2323 pSReg->ValidSel = uSel;
2324 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2325 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2326 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2327
2328 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2329 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2330 return VINF_SUCCESS;
2331}
2332
2333
2334/**
2335 * Performs a task switch.
2336 *
2337 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2338 * caller is responsible for performing the necessary checks (like DPL, TSS
2339 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2340 * reference for JMP, CALL, IRET.
2341 *
2342 * If the task switch is the due to a software interrupt or hardware exception,
2343 * the caller is responsible for validating the TSS selector and descriptor. See
2344 * Intel Instruction reference for INT n.
2345 *
2346 * @returns VBox strict status code.
2347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2348 * @param enmTaskSwitch The cause of the task switch.
2349 * @param uNextEip The EIP effective after the task switch.
2350 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2351 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2352 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2353 * @param SelTss The TSS selector of the new task.
2354 * @param pNewDescTss Pointer to the new TSS descriptor.
2355 */
2356VBOXSTRICTRC
2357iemTaskSwitch(PVMCPUCC pVCpu,
2358 IEMTASKSWITCH enmTaskSwitch,
2359 uint32_t uNextEip,
2360 uint32_t fFlags,
2361 uint16_t uErr,
2362 uint64_t uCr2,
2363 RTSEL SelTss,
2364 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2365{
2366 Assert(!IEM_IS_REAL_MODE(pVCpu));
2367 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2368 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2369
2370 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2371 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2372 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2373 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2374 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2375
2376 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2377 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2378
2379 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2380 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2381
2382 /* Update CR2 in case it's a page-fault. */
2383 /** @todo This should probably be done much earlier in IEM/PGM. See
2384 * @bugref{5653#c49}. */
2385 if (fFlags & IEM_XCPT_FLAGS_CR2)
2386 pVCpu->cpum.GstCtx.cr2 = uCr2;
2387
2388 /*
2389 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2390 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2391 */
2392 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2393 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2394 if (uNewTssLimit < uNewTssLimitMin)
2395 {
2396 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2397 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2398 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2399 }
2400
2401 /*
2402 * Task switches in VMX non-root mode always cause task switches.
2403 * The new TSS must have been read and validated (DPL, limits etc.) before a
2404 * task-switch VM-exit commences.
2405 *
2406 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2407 */
2408 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2409 {
2410 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2411 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2412 }
2413
2414 /*
2415 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2416 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2417 */
2418 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2419 {
2420 uint64_t const uExitInfo1 = SelTss;
2421 uint64_t uExitInfo2 = uErr;
2422 switch (enmTaskSwitch)
2423 {
2424 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2425 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2426 default: break;
2427 }
2428 if (fFlags & IEM_XCPT_FLAGS_ERR)
2429 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2430 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2431 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2432
2433 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2434 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2435 RT_NOREF2(uExitInfo1, uExitInfo2);
2436 }
2437
2438 /*
2439 * Check the current TSS limit. The last written byte to the current TSS during the
2440 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2441 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2442 *
2443 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2444 * end up with smaller than "legal" TSS limits.
2445 */
2446 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2447 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2448 if (uCurTssLimit < uCurTssLimitMin)
2449 {
2450 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2451 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2452 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2453 }
2454
2455 /*
2456 * Verify that the new TSS can be accessed and map it. Map only the required contents
2457 * and not the entire TSS.
2458 */
2459 uint8_t bUnmapInfoNewTss;
2460 void *pvNewTss;
2461 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2462 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2463 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2464 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2465 * not perform correct translation if this happens. See Intel spec. 7.2.1
2466 * "Task-State Segment". */
2467 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2468/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2469 * Consider wrapping the remainder into a function for simpler cleanup. */
2470 if (rcStrict != VINF_SUCCESS)
2471 {
2472 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2473 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2474 return rcStrict;
2475 }
2476
2477 /*
2478 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2479 */
2480 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2481 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2482 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2483 {
2484 uint8_t bUnmapInfoDescCurTss;
2485 PX86DESC pDescCurTss;
2486 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2487 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2488 if (rcStrict != VINF_SUCCESS)
2489 {
2490 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2491 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2492 return rcStrict;
2493 }
2494
2495 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2496 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2497 if (rcStrict != VINF_SUCCESS)
2498 {
2499 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2500 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2501 return rcStrict;
2502 }
2503
2504 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2505 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2506 {
2507 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2508 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2509 fEFlags &= ~X86_EFL_NT;
2510 }
2511 }
2512
2513 /*
2514 * Save the CPU state into the current TSS.
2515 */
2516 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2517 if (GCPtrNewTss == GCPtrCurTss)
2518 {
2519 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2520 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2521 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2522 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2523 pVCpu->cpum.GstCtx.ldtr.Sel));
2524 }
2525 if (fIsNewTss386)
2526 {
2527 /*
2528 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2529 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2530 */
2531 uint8_t bUnmapInfoCurTss32;
2532 void *pvCurTss32;
2533 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2534 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2535 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2536 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2537 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2538 if (rcStrict != VINF_SUCCESS)
2539 {
2540 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2541 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2542 return rcStrict;
2543 }
2544
2545 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2546 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2547 pCurTss32->eip = uNextEip;
2548 pCurTss32->eflags = fEFlags;
2549 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2550 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2551 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2552 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2553 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2554 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2555 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2556 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2557 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2558 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2559 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2560 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2561 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2562 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2563
2564 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2565 if (rcStrict != VINF_SUCCESS)
2566 {
2567 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2568 VBOXSTRICTRC_VAL(rcStrict)));
2569 return rcStrict;
2570 }
2571 }
2572 else
2573 {
2574 /*
2575 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2576 */
2577 uint8_t bUnmapInfoCurTss16;
2578 void *pvCurTss16;
2579 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2580 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2581 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2582 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2583 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2584 if (rcStrict != VINF_SUCCESS)
2585 {
2586 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2587 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2588 return rcStrict;
2589 }
2590
2591 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2592 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2593 pCurTss16->ip = uNextEip;
2594 pCurTss16->flags = (uint16_t)fEFlags;
2595 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2596 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2597 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2598 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2599 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2600 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2601 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2602 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2603 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2604 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2605 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2606 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2607
2608 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2609 if (rcStrict != VINF_SUCCESS)
2610 {
2611 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2612 VBOXSTRICTRC_VAL(rcStrict)));
2613 return rcStrict;
2614 }
2615 }
2616
2617 /*
2618 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2619 */
2620 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2621 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2622 {
2623 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2624 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2625 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2626 }
2627
2628 /*
2629 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2630 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2631 */
2632 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2633 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2634 bool fNewDebugTrap;
2635 if (fIsNewTss386)
2636 {
2637 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2638 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2639 uNewEip = pNewTss32->eip;
2640 uNewEflags = pNewTss32->eflags;
2641 uNewEax = pNewTss32->eax;
2642 uNewEcx = pNewTss32->ecx;
2643 uNewEdx = pNewTss32->edx;
2644 uNewEbx = pNewTss32->ebx;
2645 uNewEsp = pNewTss32->esp;
2646 uNewEbp = pNewTss32->ebp;
2647 uNewEsi = pNewTss32->esi;
2648 uNewEdi = pNewTss32->edi;
2649 uNewES = pNewTss32->es;
2650 uNewCS = pNewTss32->cs;
2651 uNewSS = pNewTss32->ss;
2652 uNewDS = pNewTss32->ds;
2653 uNewFS = pNewTss32->fs;
2654 uNewGS = pNewTss32->gs;
2655 uNewLdt = pNewTss32->selLdt;
2656 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2657 }
2658 else
2659 {
2660 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2661 uNewCr3 = 0;
2662 uNewEip = pNewTss16->ip;
2663 uNewEflags = pNewTss16->flags;
2664 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2665 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2666 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2667 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2668 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2669 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2670 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2671 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2672 uNewES = pNewTss16->es;
2673 uNewCS = pNewTss16->cs;
2674 uNewSS = pNewTss16->ss;
2675 uNewDS = pNewTss16->ds;
2676 uNewFS = 0;
2677 uNewGS = 0;
2678 uNewLdt = pNewTss16->selLdt;
2679 fNewDebugTrap = false;
2680 }
2681
2682 if (GCPtrNewTss == GCPtrCurTss)
2683 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2684 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2685
2686 /*
2687 * We're done accessing the new TSS.
2688 */
2689 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2690 if (rcStrict != VINF_SUCCESS)
2691 {
2692 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2693 return rcStrict;
2694 }
2695
2696 /*
2697 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2698 */
2699 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2700 {
2701 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2702 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2703 if (rcStrict != VINF_SUCCESS)
2704 {
2705 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2706 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2707 return rcStrict;
2708 }
2709
2710 /* Check that the descriptor indicates the new TSS is available (not busy). */
2711 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2712 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2713 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2714
2715 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2716 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2717 if (rcStrict != VINF_SUCCESS)
2718 {
2719 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2720 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2721 return rcStrict;
2722 }
2723 }
2724
2725 /*
2726 * From this point on, we're technically in the new task. We will defer exceptions
2727 * until the completion of the task switch but before executing any instructions in the new task.
2728 */
2729 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2730 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2731 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2732 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2733 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2734 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2735 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2736
2737 /* Set the busy bit in TR. */
2738 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2739
2740 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2741 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2742 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2743 {
2744 uNewEflags |= X86_EFL_NT;
2745 }
2746
2747 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2748 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2749 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2750
2751 pVCpu->cpum.GstCtx.eip = uNewEip;
2752 pVCpu->cpum.GstCtx.eax = uNewEax;
2753 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2754 pVCpu->cpum.GstCtx.edx = uNewEdx;
2755 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2756 pVCpu->cpum.GstCtx.esp = uNewEsp;
2757 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2758 pVCpu->cpum.GstCtx.esi = uNewEsi;
2759 pVCpu->cpum.GstCtx.edi = uNewEdi;
2760
2761 uNewEflags &= X86_EFL_LIVE_MASK;
2762 uNewEflags |= X86_EFL_RA1_MASK;
2763 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2764
2765 /*
2766 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2767 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2768 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2769 */
2770 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2771 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2772
2773 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2774 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2775
2776 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2777 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2778
2779 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2780 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2781
2782 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2783 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2784
2785 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2786 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2787 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2788
2789 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2790 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2791 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2792 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2793
2794 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2795 {
2796 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2797 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2798 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2799 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2800 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2801 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2802 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2803 }
2804
2805 /*
2806 * Switch CR3 for the new task.
2807 */
2808 if ( fIsNewTss386
2809 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2810 {
2811 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2812 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2813 AssertRCSuccessReturn(rc, rc);
2814
2815 /* Inform PGM. */
2816 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2817 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2818 AssertRCReturn(rc, rc);
2819 /* ignore informational status codes */
2820
2821 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2822 }
2823
2824 /*
2825 * Switch LDTR for the new task.
2826 */
2827 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2828 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2829 else
2830 {
2831 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2832
2833 IEMSELDESC DescNewLdt;
2834 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2835 if (rcStrict != VINF_SUCCESS)
2836 {
2837 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2838 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2839 return rcStrict;
2840 }
2841 if ( !DescNewLdt.Legacy.Gen.u1Present
2842 || DescNewLdt.Legacy.Gen.u1DescType
2843 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2844 {
2845 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2846 uNewLdt, DescNewLdt.Legacy.u));
2847 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2848 }
2849
2850 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2851 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2852 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2853 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2854 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2855 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2856 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2858 }
2859
2860 IEMSELDESC DescSS;
2861 if (IEM_IS_V86_MODE(pVCpu))
2862 {
2863 IEM_SET_CPL(pVCpu, 3);
2864 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2865 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2866 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2867 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2868 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2869 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2870
2871 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2872 DescSS.Legacy.u = 0;
2873 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2874 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2875 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2876 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2877 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2878 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2879 DescSS.Legacy.Gen.u2Dpl = 3;
2880 }
2881 else
2882 {
2883 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2884
2885 /*
2886 * Load the stack segment for the new task.
2887 */
2888 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2889 {
2890 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2891 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 /* Fetch the descriptor. */
2895 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2896 if (rcStrict != VINF_SUCCESS)
2897 {
2898 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2899 VBOXSTRICTRC_VAL(rcStrict)));
2900 return rcStrict;
2901 }
2902
2903 /* SS must be a data segment and writable. */
2904 if ( !DescSS.Legacy.Gen.u1DescType
2905 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2906 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2907 {
2908 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2909 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2910 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2911 }
2912
2913 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2914 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2915 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2916 {
2917 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2918 uNewCpl));
2919 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2920 }
2921
2922 /* Is it there? */
2923 if (!DescSS.Legacy.Gen.u1Present)
2924 {
2925 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2926 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2927 }
2928
2929 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2930 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2931
2932 /* Set the accessed bit before committing the result into SS. */
2933 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2934 {
2935 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2936 if (rcStrict != VINF_SUCCESS)
2937 return rcStrict;
2938 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2939 }
2940
2941 /* Commit SS. */
2942 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2943 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2944 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2945 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2946 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2947 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2949
2950 /* CPL has changed, update IEM before loading rest of segments. */
2951 IEM_SET_CPL(pVCpu, uNewCpl);
2952
2953 /*
2954 * Load the data segments for the new task.
2955 */
2956 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2957 if (rcStrict != VINF_SUCCESS)
2958 return rcStrict;
2959 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2960 if (rcStrict != VINF_SUCCESS)
2961 return rcStrict;
2962 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2963 if (rcStrict != VINF_SUCCESS)
2964 return rcStrict;
2965 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2966 if (rcStrict != VINF_SUCCESS)
2967 return rcStrict;
2968
2969 /*
2970 * Load the code segment for the new task.
2971 */
2972 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2973 {
2974 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2975 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2976 }
2977
2978 /* Fetch the descriptor. */
2979 IEMSELDESC DescCS;
2980 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2981 if (rcStrict != VINF_SUCCESS)
2982 {
2983 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2984 return rcStrict;
2985 }
2986
2987 /* CS must be a code segment. */
2988 if ( !DescCS.Legacy.Gen.u1DescType
2989 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2990 {
2991 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2992 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2993 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2994 }
2995
2996 /* For conforming CS, DPL must be less than or equal to the RPL. */
2997 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2998 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2999 {
3000 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3001 DescCS.Legacy.Gen.u2Dpl));
3002 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3003 }
3004
3005 /* For non-conforming CS, DPL must match RPL. */
3006 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3007 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3008 {
3009 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3010 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3011 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3012 }
3013
3014 /* Is it there? */
3015 if (!DescCS.Legacy.Gen.u1Present)
3016 {
3017 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3018 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3019 }
3020
3021 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3022 u64Base = X86DESC_BASE(&DescCS.Legacy);
3023
3024 /* Set the accessed bit before committing the result into CS. */
3025 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3026 {
3027 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3028 if (rcStrict != VINF_SUCCESS)
3029 return rcStrict;
3030 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3031 }
3032
3033 /* Commit CS. */
3034 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3035 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3036 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3037 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3038 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3039 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3041 }
3042
3043 /* Make sure the CPU mode is correct. */
3044 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3045 if (fExecNew != pVCpu->iem.s.fExec)
3046 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3047 pVCpu->iem.s.fExec = fExecNew;
3048
3049 /** @todo Debug trap. */
3050 if (fIsNewTss386 && fNewDebugTrap)
3051 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3052
3053 /*
3054 * Construct the error code masks based on what caused this task switch.
3055 * See Intel Instruction reference for INT.
3056 */
3057 uint16_t uExt;
3058 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3059 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3060 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3061 uExt = 1;
3062 else
3063 uExt = 0;
3064
3065 /*
3066 * Push any error code on to the new stack.
3067 */
3068 if (fFlags & IEM_XCPT_FLAGS_ERR)
3069 {
3070 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3071 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3072 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3073
3074 /* Check that there is sufficient space on the stack. */
3075 /** @todo Factor out segment limit checking for normal/expand down segments
3076 * into a separate function. */
3077 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3078 {
3079 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3080 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3081 {
3082 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3083 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3084 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3085 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3086 }
3087 }
3088 else
3089 {
3090 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3091 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3092 {
3093 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3094 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3095 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3096 }
3097 }
3098
3099
3100 if (fIsNewTss386)
3101 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3102 else
3103 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3104 if (rcStrict != VINF_SUCCESS)
3105 {
3106 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3107 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3108 return rcStrict;
3109 }
3110 }
3111
3112 /* Check the new EIP against the new CS limit. */
3113 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3114 {
3115 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3116 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3117 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3118 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3119 }
3120
3121 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3122 pVCpu->cpum.GstCtx.ss.Sel));
3123 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3124}
3125
3126
3127/**
3128 * Implements exceptions and interrupts for protected mode.
3129 *
3130 * @returns VBox strict status code.
3131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3132 * @param cbInstr The number of bytes to offset rIP by in the return
3133 * address.
3134 * @param u8Vector The interrupt / exception vector number.
3135 * @param fFlags The flags.
3136 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3137 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3138 */
3139static VBOXSTRICTRC
3140iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3141 uint8_t cbInstr,
3142 uint8_t u8Vector,
3143 uint32_t fFlags,
3144 uint16_t uErr,
3145 uint64_t uCr2) RT_NOEXCEPT
3146{
3147 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3148
3149 /*
3150 * Read the IDT entry.
3151 */
3152 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3153 {
3154 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3155 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3156 }
3157 X86DESC Idte;
3158 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3159 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3160 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3161 {
3162 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3163 return rcStrict;
3164 }
3165 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3166 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3167 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3168 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3169
3170 /*
3171 * Check the descriptor type, DPL and such.
3172 * ASSUMES this is done in the same order as described for call-gate calls.
3173 */
3174 if (Idte.Gate.u1DescType)
3175 {
3176 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3177 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3178 }
3179 bool fTaskGate = false;
3180 uint8_t f32BitGate = true;
3181 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3182 switch (Idte.Gate.u4Type)
3183 {
3184 case X86_SEL_TYPE_SYS_UNDEFINED:
3185 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3186 case X86_SEL_TYPE_SYS_LDT:
3187 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3188 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3189 case X86_SEL_TYPE_SYS_UNDEFINED2:
3190 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3191 case X86_SEL_TYPE_SYS_UNDEFINED3:
3192 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3193 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3194 case X86_SEL_TYPE_SYS_UNDEFINED4:
3195 {
3196 /** @todo check what actually happens when the type is wrong...
3197 * esp. call gates. */
3198 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3199 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3200 }
3201
3202 case X86_SEL_TYPE_SYS_286_INT_GATE:
3203 f32BitGate = false;
3204 RT_FALL_THRU();
3205 case X86_SEL_TYPE_SYS_386_INT_GATE:
3206 fEflToClear |= X86_EFL_IF;
3207 break;
3208
3209 case X86_SEL_TYPE_SYS_TASK_GATE:
3210 fTaskGate = true;
3211#ifndef IEM_IMPLEMENTS_TASKSWITCH
3212 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3213#endif
3214 break;
3215
3216 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3217 f32BitGate = false;
3218 break;
3219 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3220 break;
3221
3222 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3223 }
3224
3225 /* Check DPL against CPL if applicable. */
3226 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3227 {
3228 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3229 {
3230 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3231 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3232 }
3233 }
3234
3235 /* Is it there? */
3236 if (!Idte.Gate.u1Present)
3237 {
3238 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3239 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3240 }
3241
3242 /* Is it a task-gate? */
3243 if (fTaskGate)
3244 {
3245 /*
3246 * Construct the error code masks based on what caused this task switch.
3247 * See Intel Instruction reference for INT.
3248 */
3249 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3250 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3251 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3252 RTSEL SelTss = Idte.Gate.u16Sel;
3253
3254 /*
3255 * Fetch the TSS descriptor in the GDT.
3256 */
3257 IEMSELDESC DescTSS;
3258 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3259 if (rcStrict != VINF_SUCCESS)
3260 {
3261 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3262 VBOXSTRICTRC_VAL(rcStrict)));
3263 return rcStrict;
3264 }
3265
3266 /* The TSS descriptor must be a system segment and be available (not busy). */
3267 if ( DescTSS.Legacy.Gen.u1DescType
3268 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3269 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3270 {
3271 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3272 u8Vector, SelTss, DescTSS.Legacy.au64));
3273 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3274 }
3275
3276 /* The TSS must be present. */
3277 if (!DescTSS.Legacy.Gen.u1Present)
3278 {
3279 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3280 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3281 }
3282
3283 /* Do the actual task switch. */
3284 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3285 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3286 fFlags, uErr, uCr2, SelTss, &DescTSS);
3287 }
3288
3289 /* A null CS is bad. */
3290 RTSEL NewCS = Idte.Gate.u16Sel;
3291 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3292 {
3293 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3294 return iemRaiseGeneralProtectionFault0(pVCpu);
3295 }
3296
3297 /* Fetch the descriptor for the new CS. */
3298 IEMSELDESC DescCS;
3299 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3300 if (rcStrict != VINF_SUCCESS)
3301 {
3302 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3303 return rcStrict;
3304 }
3305
3306 /* Must be a code segment. */
3307 if (!DescCS.Legacy.Gen.u1DescType)
3308 {
3309 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3310 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3311 }
3312 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3313 {
3314 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3315 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3316 }
3317
3318 /* Don't allow lowering the privilege level. */
3319 /** @todo Does the lowering of privileges apply to software interrupts
3320 * only? This has bearings on the more-privileged or
3321 * same-privilege stack behavior further down. A testcase would
3322 * be nice. */
3323 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3324 {
3325 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3326 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3327 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3328 }
3329
3330 /* Make sure the selector is present. */
3331 if (!DescCS.Legacy.Gen.u1Present)
3332 {
3333 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3334 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3335 }
3336
3337#ifdef LOG_ENABLED
3338 /* If software interrupt, try decode it if logging is enabled and such. */
3339 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3340 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3341 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3342#endif
3343
3344 /* Check the new EIP against the new CS limit. */
3345 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3346 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3347 ? Idte.Gate.u16OffsetLow
3348 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3349 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3350 if (uNewEip > cbLimitCS)
3351 {
3352 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3353 u8Vector, uNewEip, cbLimitCS, NewCS));
3354 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3355 }
3356 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3357
3358 /* Calc the flag image to push. */
3359 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3360 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3361 fEfl &= ~X86_EFL_RF;
3362 else
3363 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3364
3365 /* From V8086 mode only go to CPL 0. */
3366 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3367 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3368 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3369 {
3370 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3371 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3372 }
3373
3374 /*
3375 * If the privilege level changes, we need to get a new stack from the TSS.
3376 * This in turns means validating the new SS and ESP...
3377 */
3378 if (uNewCpl != IEM_GET_CPL(pVCpu))
3379 {
3380 RTSEL NewSS;
3381 uint32_t uNewEsp;
3382 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3383 if (rcStrict != VINF_SUCCESS)
3384 return rcStrict;
3385
3386 IEMSELDESC DescSS;
3387 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3388 if (rcStrict != VINF_SUCCESS)
3389 return rcStrict;
3390 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3391 if (!DescSS.Legacy.Gen.u1DefBig)
3392 {
3393 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3394 uNewEsp = (uint16_t)uNewEsp;
3395 }
3396
3397 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3398
3399 /* Check that there is sufficient space for the stack frame. */
3400 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3401 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3402 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3403 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3404
3405 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3406 {
3407 if ( uNewEsp - 1 > cbLimitSS
3408 || uNewEsp < cbStackFrame)
3409 {
3410 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3411 u8Vector, NewSS, uNewEsp, cbStackFrame));
3412 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3413 }
3414 }
3415 else
3416 {
3417 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3418 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3419 {
3420 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3421 u8Vector, NewSS, uNewEsp, cbStackFrame));
3422 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3423 }
3424 }
3425
3426 /*
3427 * Start making changes.
3428 */
3429
3430 /* Set the new CPL so that stack accesses use it. */
3431 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3432 IEM_SET_CPL(pVCpu, uNewCpl);
3433
3434 /* Create the stack frame. */
3435 uint8_t bUnmapInfoStackFrame;
3436 RTPTRUNION uStackFrame;
3437 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3438 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3439 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3440 if (rcStrict != VINF_SUCCESS)
3441 return rcStrict;
3442 if (f32BitGate)
3443 {
3444 if (fFlags & IEM_XCPT_FLAGS_ERR)
3445 *uStackFrame.pu32++ = uErr;
3446 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3447 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3448 uStackFrame.pu32[2] = fEfl;
3449 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3450 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3451 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3452 if (fEfl & X86_EFL_VM)
3453 {
3454 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3455 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3456 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3457 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3458 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3459 }
3460 }
3461 else
3462 {
3463 if (fFlags & IEM_XCPT_FLAGS_ERR)
3464 *uStackFrame.pu16++ = uErr;
3465 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3466 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3467 uStackFrame.pu16[2] = fEfl;
3468 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3469 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3470 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3471 if (fEfl & X86_EFL_VM)
3472 {
3473 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3474 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3475 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3476 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3477 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3478 }
3479 }
3480 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3481 if (rcStrict != VINF_SUCCESS)
3482 return rcStrict;
3483
3484 /* Mark the selectors 'accessed' (hope this is the correct time). */
3485 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3486 * after pushing the stack frame? (Write protect the gdt + stack to
3487 * find out.) */
3488 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3489 {
3490 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3491 if (rcStrict != VINF_SUCCESS)
3492 return rcStrict;
3493 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3494 }
3495
3496 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3497 {
3498 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3499 if (rcStrict != VINF_SUCCESS)
3500 return rcStrict;
3501 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3502 }
3503
3504 /*
3505 * Start comitting the register changes (joins with the DPL=CPL branch).
3506 */
3507 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3508 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3509 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3510 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3511 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3512 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3513 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3514 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3515 * SP is loaded).
3516 * Need to check the other combinations too:
3517 * - 16-bit TSS, 32-bit handler
3518 * - 32-bit TSS, 16-bit handler */
3519 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3520 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3521 else
3522 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3523
3524 if (fEfl & X86_EFL_VM)
3525 {
3526 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3527 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3528 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3529 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3530 }
3531 }
3532 /*
3533 * Same privilege, no stack change and smaller stack frame.
3534 */
3535 else
3536 {
3537 uint64_t uNewRsp;
3538 uint8_t bUnmapInfoStackFrame;
3539 RTPTRUNION uStackFrame;
3540 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3541 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3542 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3543 if (rcStrict != VINF_SUCCESS)
3544 return rcStrict;
3545
3546 if (f32BitGate)
3547 {
3548 if (fFlags & IEM_XCPT_FLAGS_ERR)
3549 *uStackFrame.pu32++ = uErr;
3550 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3551 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3552 uStackFrame.pu32[2] = fEfl;
3553 }
3554 else
3555 {
3556 if (fFlags & IEM_XCPT_FLAGS_ERR)
3557 *uStackFrame.pu16++ = uErr;
3558 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3559 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3560 uStackFrame.pu16[2] = fEfl;
3561 }
3562 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3563 if (rcStrict != VINF_SUCCESS)
3564 return rcStrict;
3565
3566 /* Mark the CS selector as 'accessed'. */
3567 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3568 {
3569 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3570 if (rcStrict != VINF_SUCCESS)
3571 return rcStrict;
3572 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3573 }
3574
3575 /*
3576 * Start committing the register changes (joins with the other branch).
3577 */
3578 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3579 }
3580
3581 /* ... register committing continues. */
3582 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3583 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3584 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3585 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3586 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3587 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3588
3589 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3590 fEfl &= ~fEflToClear;
3591 IEMMISC_SET_EFL(pVCpu, fEfl);
3592
3593 if (fFlags & IEM_XCPT_FLAGS_CR2)
3594 pVCpu->cpum.GstCtx.cr2 = uCr2;
3595
3596 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3597 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3598
3599 /* Make sure the execution flags are correct. */
3600 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3601 if (fExecNew != pVCpu->iem.s.fExec)
3602 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3603 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3604 pVCpu->iem.s.fExec = fExecNew;
3605 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3606
3607 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3608}
3609
3610
3611/**
3612 * Implements exceptions and interrupts for long mode.
3613 *
3614 * @returns VBox strict status code.
3615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3616 * @param cbInstr The number of bytes to offset rIP by in the return
3617 * address.
3618 * @param u8Vector The interrupt / exception vector number.
3619 * @param fFlags The flags.
3620 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3621 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3622 */
3623static VBOXSTRICTRC
3624iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3625 uint8_t cbInstr,
3626 uint8_t u8Vector,
3627 uint32_t fFlags,
3628 uint16_t uErr,
3629 uint64_t uCr2) RT_NOEXCEPT
3630{
3631 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3632
3633 /*
3634 * Read the IDT entry.
3635 */
3636 uint16_t offIdt = (uint16_t)u8Vector << 4;
3637 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3638 {
3639 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3640 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3641 }
3642 X86DESC64 Idte;
3643#ifdef _MSC_VER /* Shut up silly compiler warning. */
3644 Idte.au64[0] = 0;
3645 Idte.au64[1] = 0;
3646#endif
3647 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3648 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3649 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3650 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3651 {
3652 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3653 return rcStrict;
3654 }
3655 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3656 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3657 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3658
3659 /*
3660 * Check the descriptor type, DPL and such.
3661 * ASSUMES this is done in the same order as described for call-gate calls.
3662 */
3663 if (Idte.Gate.u1DescType)
3664 {
3665 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3667 }
3668 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3669 switch (Idte.Gate.u4Type)
3670 {
3671 case AMD64_SEL_TYPE_SYS_INT_GATE:
3672 fEflToClear |= X86_EFL_IF;
3673 break;
3674 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3675 break;
3676
3677 default:
3678 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3679 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3680 }
3681
3682 /* Check DPL against CPL if applicable. */
3683 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3684 {
3685 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3686 {
3687 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3688 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3689 }
3690 }
3691
3692 /* Is it there? */
3693 if (!Idte.Gate.u1Present)
3694 {
3695 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3696 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3697 }
3698
3699 /* A null CS is bad. */
3700 RTSEL NewCS = Idte.Gate.u16Sel;
3701 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3702 {
3703 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3704 return iemRaiseGeneralProtectionFault0(pVCpu);
3705 }
3706
3707 /* Fetch the descriptor for the new CS. */
3708 IEMSELDESC DescCS;
3709 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3710 if (rcStrict != VINF_SUCCESS)
3711 {
3712 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3713 return rcStrict;
3714 }
3715
3716 /* Must be a 64-bit code segment. */
3717 if (!DescCS.Long.Gen.u1DescType)
3718 {
3719 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3720 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3721 }
3722 if ( !DescCS.Long.Gen.u1Long
3723 || DescCS.Long.Gen.u1DefBig
3724 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3725 {
3726 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3727 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3728 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3729 }
3730
3731 /* Don't allow lowering the privilege level. For non-conforming CS
3732 selectors, the CS.DPL sets the privilege level the trap/interrupt
3733 handler runs at. For conforming CS selectors, the CPL remains
3734 unchanged, but the CS.DPL must be <= CPL. */
3735 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3736 * when CPU in Ring-0. Result \#GP? */
3737 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3738 {
3739 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3740 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3741 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3742 }
3743
3744
3745 /* Make sure the selector is present. */
3746 if (!DescCS.Legacy.Gen.u1Present)
3747 {
3748 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3749 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3750 }
3751
3752 /* Check that the new RIP is canonical. */
3753 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3754 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3755 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3756 if (!IEM_IS_CANONICAL(uNewRip))
3757 {
3758 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3759 return iemRaiseGeneralProtectionFault0(pVCpu);
3760 }
3761
3762 /*
3763 * If the privilege level changes or if the IST isn't zero, we need to get
3764 * a new stack from the TSS.
3765 */
3766 uint64_t uNewRsp;
3767 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3768 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3769 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3770 || Idte.Gate.u3IST != 0)
3771 {
3772 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3773 if (rcStrict != VINF_SUCCESS)
3774 return rcStrict;
3775 }
3776 else
3777 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3778 uNewRsp &= ~(uint64_t)0xf;
3779
3780 /*
3781 * Calc the flag image to push.
3782 */
3783 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3784 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3785 fEfl &= ~X86_EFL_RF;
3786 else
3787 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3788
3789 /*
3790 * Start making changes.
3791 */
3792 /* Set the new CPL so that stack accesses use it. */
3793 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3794 IEM_SET_CPL(pVCpu, uNewCpl);
3795/** @todo Setting CPL this early seems wrong as it would affect and errors we
3796 * raise accessing the stack and (?) GDT/LDT... */
3797
3798 /* Create the stack frame. */
3799 uint8_t bUnmapInfoStackFrame;
3800 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3801 RTPTRUNION uStackFrame;
3802 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3803 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3804 if (rcStrict != VINF_SUCCESS)
3805 return rcStrict;
3806
3807 if (fFlags & IEM_XCPT_FLAGS_ERR)
3808 *uStackFrame.pu64++ = uErr;
3809 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3810 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3811 uStackFrame.pu64[2] = fEfl;
3812 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3813 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3814 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3815 if (rcStrict != VINF_SUCCESS)
3816 return rcStrict;
3817
3818 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3819 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3820 * after pushing the stack frame? (Write protect the gdt + stack to
3821 * find out.) */
3822 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3823 {
3824 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3825 if (rcStrict != VINF_SUCCESS)
3826 return rcStrict;
3827 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3828 }
3829
3830 /*
3831 * Start comitting the register changes.
3832 */
3833 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3834 * hidden registers when interrupting 32-bit or 16-bit code! */
3835 if (uNewCpl != uOldCpl)
3836 {
3837 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3838 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3839 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3840 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3841 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3842 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3843 }
3844 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3845 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3846 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3847 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3848 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3849 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3850 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3851 pVCpu->cpum.GstCtx.rip = uNewRip;
3852
3853 fEfl &= ~fEflToClear;
3854 IEMMISC_SET_EFL(pVCpu, fEfl);
3855
3856 if (fFlags & IEM_XCPT_FLAGS_CR2)
3857 pVCpu->cpum.GstCtx.cr2 = uCr2;
3858
3859 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3860 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3861
3862 iemRecalcExecModeAndCplFlags(pVCpu);
3863
3864 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3865}
3866
3867
3868/**
3869 * Implements exceptions and interrupts.
3870 *
3871 * All exceptions and interrupts goes thru this function!
3872 *
3873 * @returns VBox strict status code.
3874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3875 * @param cbInstr The number of bytes to offset rIP by in the return
3876 * address.
3877 * @param u8Vector The interrupt / exception vector number.
3878 * @param fFlags The flags.
3879 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3880 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3881 */
3882VBOXSTRICTRC
3883iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3884 uint8_t cbInstr,
3885 uint8_t u8Vector,
3886 uint32_t fFlags,
3887 uint16_t uErr,
3888 uint64_t uCr2) RT_NOEXCEPT
3889{
3890 /*
3891 * Get all the state that we might need here.
3892 */
3893 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3894 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3895
3896#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3897 /*
3898 * Flush prefetch buffer
3899 */
3900 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3901#endif
3902
3903 /*
3904 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3905 */
3906 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3907 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3908 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3909 | IEM_XCPT_FLAGS_BP_INSTR
3910 | IEM_XCPT_FLAGS_ICEBP_INSTR
3911 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3912 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3913 {
3914 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3915 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3916 u8Vector = X86_XCPT_GP;
3917 uErr = 0;
3918 }
3919
3920 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3921#ifdef DBGFTRACE_ENABLED
3922 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3923 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3924 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3925#endif
3926
3927 /*
3928 * Check if DBGF wants to intercept the exception.
3929 */
3930 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3931 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3932 { /* likely */ }
3933 else
3934 {
3935 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3936 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3937 if (rcStrict != VINF_SUCCESS)
3938 return rcStrict;
3939 }
3940
3941 /*
3942 * Evaluate whether NMI blocking should be in effect.
3943 * Normally, NMI blocking is in effect whenever we inject an NMI.
3944 */
3945 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3946 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3947
3948#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3949 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3950 {
3951 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3952 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3953 return rcStrict0;
3954
3955 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3956 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3957 {
3958 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3959 fBlockNmi = false;
3960 }
3961 }
3962#endif
3963
3964#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3965 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3966 {
3967 /*
3968 * If the event is being injected as part of VMRUN, it isn't subject to event
3969 * intercepts in the nested-guest. However, secondary exceptions that occur
3970 * during injection of any event -are- subject to exception intercepts.
3971 *
3972 * See AMD spec. 15.20 "Event Injection".
3973 */
3974 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3975 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3976 else
3977 {
3978 /*
3979 * Check and handle if the event being raised is intercepted.
3980 */
3981 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3982 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3983 return rcStrict0;
3984 }
3985 }
3986#endif
3987
3988 /*
3989 * Set NMI blocking if necessary.
3990 */
3991 if (fBlockNmi)
3992 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3993
3994 /*
3995 * Do recursion accounting.
3996 */
3997 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3998 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3999 if (pVCpu->iem.s.cXcptRecursions == 0)
4000 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4001 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4002 else
4003 {
4004 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4005 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4006 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4007
4008 if (pVCpu->iem.s.cXcptRecursions >= 4)
4009 {
4010#ifdef DEBUG_bird
4011 AssertFailed();
4012#endif
4013 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4014 }
4015
4016 /*
4017 * Evaluate the sequence of recurring events.
4018 */
4019 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4020 NULL /* pXcptRaiseInfo */);
4021 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4022 { /* likely */ }
4023 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4024 {
4025 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4026 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4027 u8Vector = X86_XCPT_DF;
4028 uErr = 0;
4029#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4030 /* VMX nested-guest #DF intercept needs to be checked here. */
4031 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4032 {
4033 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4034 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4035 return rcStrict0;
4036 }
4037#endif
4038 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4039 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4040 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4041 }
4042 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4043 {
4044 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4045 return iemInitiateCpuShutdown(pVCpu);
4046 }
4047 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4048 {
4049 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4050 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4051 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4052 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4053 return VERR_EM_GUEST_CPU_HANG;
4054 }
4055 else
4056 {
4057 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4058 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4059 return VERR_IEM_IPE_9;
4060 }
4061
4062 /*
4063 * The 'EXT' bit is set when an exception occurs during deliver of an external
4064 * event (such as an interrupt or earlier exception)[1]. Privileged software
4065 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4066 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4067 *
4068 * [1] - Intel spec. 6.13 "Error Code"
4069 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4070 * [3] - Intel Instruction reference for INT n.
4071 */
4072 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4073 && (fFlags & IEM_XCPT_FLAGS_ERR)
4074 && u8Vector != X86_XCPT_PF
4075 && u8Vector != X86_XCPT_DF)
4076 {
4077 uErr |= X86_TRAP_ERR_EXTERNAL;
4078 }
4079 }
4080
4081 pVCpu->iem.s.cXcptRecursions++;
4082 pVCpu->iem.s.uCurXcpt = u8Vector;
4083 pVCpu->iem.s.fCurXcpt = fFlags;
4084 pVCpu->iem.s.uCurXcptErr = uErr;
4085 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4086
4087 /*
4088 * Extensive logging.
4089 */
4090#if defined(LOG_ENABLED) && defined(IN_RING3)
4091 if (LogIs3Enabled())
4092 {
4093 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4094 char szRegs[4096];
4095 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4096 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4097 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4098 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4099 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4100 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4101 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4102 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4103 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4104 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4105 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4106 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4107 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4108 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4109 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4110 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4111 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4112 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4113 " efer=%016VR{efer}\n"
4114 " pat=%016VR{pat}\n"
4115 " sf_mask=%016VR{sf_mask}\n"
4116 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4117 " lstar=%016VR{lstar}\n"
4118 " star=%016VR{star} cstar=%016VR{cstar}\n"
4119 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4120 );
4121
4122 char szInstr[256];
4123 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4124 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4125 szInstr, sizeof(szInstr), NULL);
4126 Log3(("%s%s\n", szRegs, szInstr));
4127 }
4128#endif /* LOG_ENABLED */
4129
4130 /*
4131 * Stats.
4132 */
4133 uint64_t const uTimestamp = ASMReadTSC();
4134 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4135 {
4136 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4137 EMHistoryAddExit(pVCpu,
4138 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4139 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4140 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4141 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4142 }
4143 else
4144 {
4145 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4146 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4147 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4148 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4149 if (fFlags & IEM_XCPT_FLAGS_ERR)
4150 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4151 if (fFlags & IEM_XCPT_FLAGS_CR2)
4152 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4153 }
4154
4155 /*
4156 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4157 * to ensure that a stale TLB or paging cache entry will only cause one
4158 * spurious #PF.
4159 */
4160 if ( u8Vector == X86_XCPT_PF
4161 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4162 IEMTlbInvalidatePage(pVCpu, uCr2);
4163
4164 /*
4165 * Call the mode specific worker function.
4166 */
4167 VBOXSTRICTRC rcStrict;
4168 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4169 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4170 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4171 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4172 else
4173 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4174
4175 /* Flush the prefetch buffer. */
4176 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4177
4178 /*
4179 * Unwind.
4180 */
4181 pVCpu->iem.s.cXcptRecursions--;
4182 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4183 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4184 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4185 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4186 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4187 return rcStrict;
4188}
4189
4190#ifdef IEM_WITH_SETJMP
4191/**
4192 * See iemRaiseXcptOrInt. Will not return.
4193 */
4194DECL_NO_RETURN(void)
4195iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4196 uint8_t cbInstr,
4197 uint8_t u8Vector,
4198 uint32_t fFlags,
4199 uint16_t uErr,
4200 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4201{
4202 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4203 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4204}
4205#endif
4206
4207
4208/** \#DE - 00. */
4209VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4210{
4211 if (GCMIsInterceptingXcptDE(pVCpu))
4212 {
4213 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4214 if (rc == VINF_SUCCESS)
4215 {
4216 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4217 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4218 }
4219 }
4220 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4221}
4222
4223
4224#ifdef IEM_WITH_SETJMP
4225/** \#DE - 00. */
4226DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4227{
4228 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4229}
4230#endif
4231
4232
4233/** \#DB - 01.
4234 * @note This automatically clear DR7.GD. */
4235VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4236{
4237 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4238 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4239 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4240}
4241
4242
4243/** \#BR - 05. */
4244VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4245{
4246 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4247}
4248
4249
4250/** \#UD - 06. */
4251VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4252{
4253 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4254}
4255
4256
4257#ifdef IEM_WITH_SETJMP
4258/** \#UD - 06. */
4259DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4260{
4261 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4262}
4263#endif
4264
4265
4266/** \#NM - 07. */
4267VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4268{
4269 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4270}
4271
4272
4273#ifdef IEM_WITH_SETJMP
4274/** \#NM - 07. */
4275DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4276{
4277 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4278}
4279#endif
4280
4281
4282/** \#TS(err) - 0a. */
4283VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4284{
4285 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4286}
4287
4288
4289/** \#TS(tr) - 0a. */
4290VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4291{
4292 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4293 pVCpu->cpum.GstCtx.tr.Sel, 0);
4294}
4295
4296
4297/** \#TS(0) - 0a. */
4298VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4299{
4300 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4301 0, 0);
4302}
4303
4304
4305/** \#TS(err) - 0a. */
4306VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4307{
4308 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4309 uSel & X86_SEL_MASK_OFF_RPL, 0);
4310}
4311
4312
4313/** \#NP(err) - 0b. */
4314VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4315{
4316 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4317}
4318
4319
4320/** \#NP(sel) - 0b. */
4321VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4322{
4323 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4324 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4325 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4326 uSel & ~X86_SEL_RPL, 0);
4327}
4328
4329
4330/** \#SS(seg) - 0c. */
4331VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4332{
4333 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4334 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4335 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4336 uSel & ~X86_SEL_RPL, 0);
4337}
4338
4339
4340/** \#SS(err) - 0c. */
4341VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4342{
4343 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4344 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4345 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4346}
4347
4348
4349/** \#GP(n) - 0d. */
4350VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4351{
4352 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4353 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4354}
4355
4356
4357/** \#GP(0) - 0d. */
4358VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4359{
4360 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4361 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4362}
4363
4364#ifdef IEM_WITH_SETJMP
4365/** \#GP(0) - 0d. */
4366DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4367{
4368 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4369 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4370}
4371#endif
4372
4373
4374/** \#GP(sel) - 0d. */
4375VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4376{
4377 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4378 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4379 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4380 Sel & ~X86_SEL_RPL, 0);
4381}
4382
4383
4384/** \#GP(0) - 0d. */
4385VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4386{
4387 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4388 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4389}
4390
4391
4392/** \#GP(sel) - 0d. */
4393VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4394{
4395 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4396 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4397 NOREF(iSegReg); NOREF(fAccess);
4398 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4399 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4400}
4401
4402#ifdef IEM_WITH_SETJMP
4403/** \#GP(sel) - 0d, longjmp. */
4404DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4405{
4406 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4407 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4408 NOREF(iSegReg); NOREF(fAccess);
4409 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4410 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4411}
4412#endif
4413
4414/** \#GP(sel) - 0d. */
4415VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4416{
4417 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4418 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4419 NOREF(Sel);
4420 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4421}
4422
4423#ifdef IEM_WITH_SETJMP
4424/** \#GP(sel) - 0d, longjmp. */
4425DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4426{
4427 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4428 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4429 NOREF(Sel);
4430 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4431}
4432#endif
4433
4434
4435/** \#GP(sel) - 0d. */
4436VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4437{
4438 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4439 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4440 NOREF(iSegReg); NOREF(fAccess);
4441 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4442}
4443
4444#ifdef IEM_WITH_SETJMP
4445/** \#GP(sel) - 0d, longjmp. */
4446DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4447{
4448 NOREF(iSegReg); NOREF(fAccess);
4449 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4450}
4451#endif
4452
4453
4454/** \#PF(n) - 0e. */
4455VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4456{
4457 uint16_t uErr;
4458 switch (rc)
4459 {
4460 case VERR_PAGE_NOT_PRESENT:
4461 case VERR_PAGE_TABLE_NOT_PRESENT:
4462 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4463 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4464 uErr = 0;
4465 break;
4466
4467 case VERR_RESERVED_PAGE_TABLE_BITS:
4468 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4469 break;
4470
4471 default:
4472 AssertMsgFailed(("%Rrc\n", rc));
4473 RT_FALL_THRU();
4474 case VERR_ACCESS_DENIED:
4475 uErr = X86_TRAP_PF_P;
4476 break;
4477 }
4478
4479 if (IEM_GET_CPL(pVCpu) == 3)
4480 uErr |= X86_TRAP_PF_US;
4481
4482 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4483 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4484 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4485 uErr |= X86_TRAP_PF_ID;
4486
4487#if 0 /* This is so much non-sense, really. Why was it done like that? */
4488 /* Note! RW access callers reporting a WRITE protection fault, will clear
4489 the READ flag before calling. So, read-modify-write accesses (RW)
4490 can safely be reported as READ faults. */
4491 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4492 uErr |= X86_TRAP_PF_RW;
4493#else
4494 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4495 {
4496 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4497 /// (regardless of outcome of the comparison in the latter case).
4498 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4499 uErr |= X86_TRAP_PF_RW;
4500 }
4501#endif
4502
4503 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4504 of the memory operand rather than at the start of it. (Not sure what
4505 happens if it crosses a page boundrary.) The current heuristics for
4506 this is to report the #PF for the last byte if the access is more than
4507 64 bytes. This is probably not correct, but we can work that out later,
4508 main objective now is to get FXSAVE to work like for real hardware and
4509 make bs3-cpu-basic2 work. */
4510 if (cbAccess <= 64)
4511 { /* likely*/ }
4512 else
4513 GCPtrWhere += cbAccess - 1;
4514
4515 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4516 uErr, GCPtrWhere);
4517}
4518
4519#ifdef IEM_WITH_SETJMP
4520/** \#PF(n) - 0e, longjmp. */
4521DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4522 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4523{
4524 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4525}
4526#endif
4527
4528
4529/** \#MF(0) - 10. */
4530VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4531{
4532 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4533 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4534
4535 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4536 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4537 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4538}
4539
4540#ifdef IEM_WITH_SETJMP
4541/** \#MF(0) - 10, longjmp. */
4542DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4543{
4544 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4545}
4546#endif
4547
4548
4549/** \#AC(0) - 11. */
4550VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4551{
4552 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4553}
4554
4555#ifdef IEM_WITH_SETJMP
4556/** \#AC(0) - 11, longjmp. */
4557DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4558{
4559 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4560}
4561#endif
4562
4563
4564/** \#XF(0)/\#XM(0) - 19. */
4565VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4566{
4567 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4568}
4569
4570
4571#ifdef IEM_WITH_SETJMP
4572/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4573DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4574{
4575 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4576}
4577#endif
4578
4579
4580/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4581IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4582{
4583 NOREF(cbInstr);
4584 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4585}
4586
4587
4588/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4589IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4590{
4591 NOREF(cbInstr);
4592 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4593}
4594
4595
4596/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4597IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4598{
4599 NOREF(cbInstr);
4600 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4601}
4602
4603
4604/** @} */
4605
4606/** @name Common opcode decoders.
4607 * @{
4608 */
4609//#include <iprt/mem.h>
4610
4611/**
4612 * Used to add extra details about a stub case.
4613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4614 */
4615void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4616{
4617#if defined(LOG_ENABLED) && defined(IN_RING3)
4618 PVM pVM = pVCpu->CTX_SUFF(pVM);
4619 char szRegs[4096];
4620 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4621 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4622 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4623 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4624 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4625 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4626 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4627 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4628 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4629 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4630 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4631 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4632 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4633 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4634 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4635 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4636 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4637 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4638 " efer=%016VR{efer}\n"
4639 " pat=%016VR{pat}\n"
4640 " sf_mask=%016VR{sf_mask}\n"
4641 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4642 " lstar=%016VR{lstar}\n"
4643 " star=%016VR{star} cstar=%016VR{cstar}\n"
4644 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4645 );
4646
4647 char szInstr[256];
4648 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4649 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4650 szInstr, sizeof(szInstr), NULL);
4651
4652 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4653#else
4654 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4655#endif
4656}
4657
4658/** @} */
4659
4660
4661
4662/** @name Register Access.
4663 * @{
4664 */
4665
4666/**
4667 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4668 *
4669 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4670 * segment limit.
4671 *
4672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4673 * @param cbInstr Instruction size.
4674 * @param offNextInstr The offset of the next instruction.
4675 * @param enmEffOpSize Effective operand size.
4676 */
4677VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4678 IEMMODE enmEffOpSize) RT_NOEXCEPT
4679{
4680 switch (enmEffOpSize)
4681 {
4682 case IEMMODE_16BIT:
4683 {
4684 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4685 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4686 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4687 pVCpu->cpum.GstCtx.rip = uNewIp;
4688 else
4689 return iemRaiseGeneralProtectionFault0(pVCpu);
4690 break;
4691 }
4692
4693 case IEMMODE_32BIT:
4694 {
4695 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4696 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4697
4698 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4699 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4700 pVCpu->cpum.GstCtx.rip = uNewEip;
4701 else
4702 return iemRaiseGeneralProtectionFault0(pVCpu);
4703 break;
4704 }
4705
4706 case IEMMODE_64BIT:
4707 {
4708 Assert(IEM_IS_64BIT_CODE(pVCpu));
4709
4710 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4711 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4712 pVCpu->cpum.GstCtx.rip = uNewRip;
4713 else
4714 return iemRaiseGeneralProtectionFault0(pVCpu);
4715 break;
4716 }
4717
4718 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4719 }
4720
4721#ifndef IEM_WITH_CODE_TLB
4722 /* Flush the prefetch buffer. */
4723 pVCpu->iem.s.cbOpcode = cbInstr;
4724#endif
4725
4726 /*
4727 * Clear RF and finish the instruction (maybe raise #DB).
4728 */
4729 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4730}
4731
4732
4733/**
4734 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4735 *
4736 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4737 * segment limit.
4738 *
4739 * @returns Strict VBox status code.
4740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4741 * @param cbInstr Instruction size.
4742 * @param offNextInstr The offset of the next instruction.
4743 */
4744VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4745{
4746 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4747
4748 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4749 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4750 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4751 pVCpu->cpum.GstCtx.rip = uNewIp;
4752 else
4753 return iemRaiseGeneralProtectionFault0(pVCpu);
4754
4755#ifndef IEM_WITH_CODE_TLB
4756 /* Flush the prefetch buffer. */
4757 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4758#endif
4759
4760 /*
4761 * Clear RF and finish the instruction (maybe raise #DB).
4762 */
4763 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4764}
4765
4766
4767/**
4768 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4769 *
4770 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4771 * segment limit.
4772 *
4773 * @returns Strict VBox status code.
4774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4775 * @param cbInstr Instruction size.
4776 * @param offNextInstr The offset of the next instruction.
4777 * @param enmEffOpSize Effective operand size.
4778 */
4779VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4780 IEMMODE enmEffOpSize) RT_NOEXCEPT
4781{
4782 if (enmEffOpSize == IEMMODE_32BIT)
4783 {
4784 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4785
4786 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4787 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4788 pVCpu->cpum.GstCtx.rip = uNewEip;
4789 else
4790 return iemRaiseGeneralProtectionFault0(pVCpu);
4791 }
4792 else
4793 {
4794 Assert(enmEffOpSize == IEMMODE_64BIT);
4795
4796 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4797 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4798 pVCpu->cpum.GstCtx.rip = uNewRip;
4799 else
4800 return iemRaiseGeneralProtectionFault0(pVCpu);
4801 }
4802
4803#ifndef IEM_WITH_CODE_TLB
4804 /* Flush the prefetch buffer. */
4805 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4806#endif
4807
4808 /*
4809 * Clear RF and finish the instruction (maybe raise #DB).
4810 */
4811 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4812}
4813
4814/** @} */
4815
4816
4817/** @name FPU access and helpers.
4818 *
4819 * @{
4820 */
4821
4822/**
4823 * Updates the x87.DS and FPUDP registers.
4824 *
4825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4826 * @param pFpuCtx The FPU context.
4827 * @param iEffSeg The effective segment register.
4828 * @param GCPtrEff The effective address relative to @a iEffSeg.
4829 */
4830DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4831{
4832 RTSEL sel;
4833 switch (iEffSeg)
4834 {
4835 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4836 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4837 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4838 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4839 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4840 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4841 default:
4842 AssertMsgFailed(("%d\n", iEffSeg));
4843 sel = pVCpu->cpum.GstCtx.ds.Sel;
4844 }
4845 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4846 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4847 {
4848 pFpuCtx->DS = 0;
4849 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4850 }
4851 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4852 {
4853 pFpuCtx->DS = sel;
4854 pFpuCtx->FPUDP = GCPtrEff;
4855 }
4856 else
4857 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4858}
4859
4860
4861/**
4862 * Rotates the stack registers in the push direction.
4863 *
4864 * @param pFpuCtx The FPU context.
4865 * @remarks This is a complete waste of time, but fxsave stores the registers in
4866 * stack order.
4867 */
4868DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4869{
4870 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4871 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4872 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4873 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4874 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4875 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4876 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4877 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4878 pFpuCtx->aRegs[0].r80 = r80Tmp;
4879}
4880
4881
4882/**
4883 * Rotates the stack registers in the pop direction.
4884 *
4885 * @param pFpuCtx The FPU context.
4886 * @remarks This is a complete waste of time, but fxsave stores the registers in
4887 * stack order.
4888 */
4889DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4890{
4891 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4892 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4893 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4894 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4895 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4896 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4897 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4898 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4899 pFpuCtx->aRegs[7].r80 = r80Tmp;
4900}
4901
4902
4903/**
4904 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4905 * exception prevents it.
4906 *
4907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4908 * @param pResult The FPU operation result to push.
4909 * @param pFpuCtx The FPU context.
4910 */
4911static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4912{
4913 /* Update FSW and bail if there are pending exceptions afterwards. */
4914 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4915 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4916 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4917 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4918 {
4919 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4920 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4921 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4922 pFpuCtx->FSW = fFsw;
4923 return;
4924 }
4925
4926 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4927 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4928 {
4929 /* All is fine, push the actual value. */
4930 pFpuCtx->FTW |= RT_BIT(iNewTop);
4931 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4932 }
4933 else if (pFpuCtx->FCW & X86_FCW_IM)
4934 {
4935 /* Masked stack overflow, push QNaN. */
4936 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4937 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4938 }
4939 else
4940 {
4941 /* Raise stack overflow, don't push anything. */
4942 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4943 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4944 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4945 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4946 return;
4947 }
4948
4949 fFsw &= ~X86_FSW_TOP_MASK;
4950 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4951 pFpuCtx->FSW = fFsw;
4952
4953 iemFpuRotateStackPush(pFpuCtx);
4954 RT_NOREF(pVCpu);
4955}
4956
4957
4958/**
4959 * Stores a result in a FPU register and updates the FSW and FTW.
4960 *
4961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4962 * @param pFpuCtx The FPU context.
4963 * @param pResult The result to store.
4964 * @param iStReg Which FPU register to store it in.
4965 */
4966static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4967{
4968 Assert(iStReg < 8);
4969 uint16_t fNewFsw = pFpuCtx->FSW;
4970 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4971 fNewFsw &= ~X86_FSW_C_MASK;
4972 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4973 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4974 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4975 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4976 pFpuCtx->FSW = fNewFsw;
4977 pFpuCtx->FTW |= RT_BIT(iReg);
4978 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4979 RT_NOREF(pVCpu);
4980}
4981
4982
4983/**
4984 * Only updates the FPU status word (FSW) with the result of the current
4985 * instruction.
4986 *
4987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4988 * @param pFpuCtx The FPU context.
4989 * @param u16FSW The FSW output of the current instruction.
4990 */
4991static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4992{
4993 uint16_t fNewFsw = pFpuCtx->FSW;
4994 fNewFsw &= ~X86_FSW_C_MASK;
4995 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4996 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4997 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4998 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4999 pFpuCtx->FSW = fNewFsw;
5000 RT_NOREF(pVCpu);
5001}
5002
5003
5004/**
5005 * Pops one item off the FPU stack if no pending exception prevents it.
5006 *
5007 * @param pFpuCtx The FPU context.
5008 */
5009static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5010{
5011 /* Check pending exceptions. */
5012 uint16_t uFSW = pFpuCtx->FSW;
5013 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5014 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5015 return;
5016
5017 /* TOP--. */
5018 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5019 uFSW &= ~X86_FSW_TOP_MASK;
5020 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5021 pFpuCtx->FSW = uFSW;
5022
5023 /* Mark the previous ST0 as empty. */
5024 iOldTop >>= X86_FSW_TOP_SHIFT;
5025 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5026
5027 /* Rotate the registers. */
5028 iemFpuRotateStackPop(pFpuCtx);
5029}
5030
5031
5032/**
5033 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5034 *
5035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5036 * @param pResult The FPU operation result to push.
5037 * @param uFpuOpcode The FPU opcode value.
5038 */
5039void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5040{
5041 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5042 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5043 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5044}
5045
5046
5047/**
5048 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5049 * and sets FPUDP and FPUDS.
5050 *
5051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5052 * @param pResult The FPU operation result to push.
5053 * @param iEffSeg The effective segment register.
5054 * @param GCPtrEff The effective address relative to @a iEffSeg.
5055 * @param uFpuOpcode The FPU opcode value.
5056 */
5057void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5058 uint16_t uFpuOpcode) RT_NOEXCEPT
5059{
5060 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5061 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5062 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5063 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5064}
5065
5066
5067/**
5068 * Replace ST0 with the first value and push the second onto the FPU stack,
5069 * unless a pending exception prevents it.
5070 *
5071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5072 * @param pResult The FPU operation result to store and push.
5073 * @param uFpuOpcode The FPU opcode value.
5074 */
5075void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5076{
5077 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5078 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5079
5080 /* Update FSW and bail if there are pending exceptions afterwards. */
5081 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5082 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5083 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5084 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5085 {
5086 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5087 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5088 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5089 pFpuCtx->FSW = fFsw;
5090 return;
5091 }
5092
5093 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5094 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5095 {
5096 /* All is fine, push the actual value. */
5097 pFpuCtx->FTW |= RT_BIT(iNewTop);
5098 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5099 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5100 }
5101 else if (pFpuCtx->FCW & X86_FCW_IM)
5102 {
5103 /* Masked stack overflow, push QNaN. */
5104 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5105 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5106 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5107 }
5108 else
5109 {
5110 /* Raise stack overflow, don't push anything. */
5111 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5112 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5113 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5115 return;
5116 }
5117
5118 fFsw &= ~X86_FSW_TOP_MASK;
5119 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5120 pFpuCtx->FSW = fFsw;
5121
5122 iemFpuRotateStackPush(pFpuCtx);
5123}
5124
5125
5126/**
5127 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5128 * FOP.
5129 *
5130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5131 * @param pResult The result to store.
5132 * @param iStReg Which FPU register to store it in.
5133 * @param uFpuOpcode The FPU opcode value.
5134 */
5135void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5136{
5137 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5138 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5139 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5140}
5141
5142
5143/**
5144 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5145 * FOP, and then pops the stack.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5148 * @param pResult The result to store.
5149 * @param iStReg Which FPU register to store it in.
5150 * @param uFpuOpcode The FPU opcode value.
5151 */
5152void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5153{
5154 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5155 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5156 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5157 iemFpuMaybePopOne(pFpuCtx);
5158}
5159
5160
5161/**
5162 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5163 * FPUDP, and FPUDS.
5164 *
5165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5166 * @param pResult The result to store.
5167 * @param iStReg Which FPU register to store it in.
5168 * @param iEffSeg The effective memory operand selector register.
5169 * @param GCPtrEff The effective memory operand offset.
5170 * @param uFpuOpcode The FPU opcode value.
5171 */
5172void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5173 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5174{
5175 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5176 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5177 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5178 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5179}
5180
5181
5182/**
5183 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5184 * FPUDP, and FPUDS, and then pops the stack.
5185 *
5186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5187 * @param pResult The result to store.
5188 * @param iStReg Which FPU register to store it in.
5189 * @param iEffSeg The effective memory operand selector register.
5190 * @param GCPtrEff The effective memory operand offset.
5191 * @param uFpuOpcode The FPU opcode value.
5192 */
5193void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5194 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5195{
5196 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5197 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5198 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5199 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5200 iemFpuMaybePopOne(pFpuCtx);
5201}
5202
5203
5204/**
5205 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5206 *
5207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5208 * @param uFpuOpcode The FPU opcode value.
5209 */
5210void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5211{
5212 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5213 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5214}
5215
5216
5217/**
5218 * Updates the FSW, FOP, FPUIP, and FPUCS.
5219 *
5220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5221 * @param u16FSW The FSW from the current instruction.
5222 * @param uFpuOpcode The FPU opcode value.
5223 */
5224void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5225{
5226 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5227 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5228 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5229}
5230
5231
5232/**
5233 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5234 *
5235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5236 * @param u16FSW The FSW from the current instruction.
5237 * @param uFpuOpcode The FPU opcode value.
5238 */
5239void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5240{
5241 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5242 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5243 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5244 iemFpuMaybePopOne(pFpuCtx);
5245}
5246
5247
5248/**
5249 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5250 *
5251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5252 * @param u16FSW The FSW from the current instruction.
5253 * @param iEffSeg The effective memory operand selector register.
5254 * @param GCPtrEff The effective memory operand offset.
5255 * @param uFpuOpcode The FPU opcode value.
5256 */
5257void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5258{
5259 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5260 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5261 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5262 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5263}
5264
5265
5266/**
5267 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5268 *
5269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5270 * @param u16FSW The FSW from the current instruction.
5271 * @param uFpuOpcode The FPU opcode value.
5272 */
5273void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5274{
5275 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5276 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5277 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5278 iemFpuMaybePopOne(pFpuCtx);
5279 iemFpuMaybePopOne(pFpuCtx);
5280}
5281
5282
5283/**
5284 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5285 *
5286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5287 * @param u16FSW The FSW from the current instruction.
5288 * @param iEffSeg The effective memory operand selector register.
5289 * @param GCPtrEff The effective memory operand offset.
5290 * @param uFpuOpcode The FPU opcode value.
5291 */
5292void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5293 uint16_t uFpuOpcode) RT_NOEXCEPT
5294{
5295 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5296 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5297 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5298 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5299 iemFpuMaybePopOne(pFpuCtx);
5300}
5301
5302
5303/**
5304 * Worker routine for raising an FPU stack underflow exception.
5305 *
5306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5307 * @param pFpuCtx The FPU context.
5308 * @param iStReg The stack register being accessed.
5309 */
5310static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5311{
5312 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5313 if (pFpuCtx->FCW & X86_FCW_IM)
5314 {
5315 /* Masked underflow. */
5316 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5317 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5318 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5319 if (iStReg != UINT8_MAX)
5320 {
5321 pFpuCtx->FTW |= RT_BIT(iReg);
5322 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5323 }
5324 }
5325 else
5326 {
5327 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5328 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5329 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5330 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5331 }
5332 RT_NOREF(pVCpu);
5333}
5334
5335
5336/**
5337 * Raises a FPU stack underflow exception.
5338 *
5339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5340 * @param iStReg The destination register that should be loaded
5341 * with QNaN if \#IS is not masked. Specify
5342 * UINT8_MAX if none (like for fcom).
5343 * @param uFpuOpcode The FPU opcode value.
5344 */
5345void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5346{
5347 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5348 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5349 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5350}
5351
5352
5353void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5354{
5355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5356 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5357 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5358 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5359}
5360
5361
5362void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5363{
5364 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5365 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5366 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5367 iemFpuMaybePopOne(pFpuCtx);
5368}
5369
5370
5371void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5372 uint16_t uFpuOpcode) RT_NOEXCEPT
5373{
5374 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5375 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5376 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5377 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5378 iemFpuMaybePopOne(pFpuCtx);
5379}
5380
5381
5382void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5383{
5384 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5385 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5386 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5387 iemFpuMaybePopOne(pFpuCtx);
5388 iemFpuMaybePopOne(pFpuCtx);
5389}
5390
5391
5392void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5393{
5394 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5395 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5396
5397 if (pFpuCtx->FCW & X86_FCW_IM)
5398 {
5399 /* Masked overflow - Push QNaN. */
5400 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5401 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5402 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5403 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5404 pFpuCtx->FTW |= RT_BIT(iNewTop);
5405 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5406 iemFpuRotateStackPush(pFpuCtx);
5407 }
5408 else
5409 {
5410 /* Exception pending - don't change TOP or the register stack. */
5411 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5412 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5413 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5414 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5415 }
5416}
5417
5418
5419void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5420{
5421 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5422 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5423
5424 if (pFpuCtx->FCW & X86_FCW_IM)
5425 {
5426 /* Masked overflow - Push QNaN. */
5427 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5428 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5429 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5430 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5431 pFpuCtx->FTW |= RT_BIT(iNewTop);
5432 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5433 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5434 iemFpuRotateStackPush(pFpuCtx);
5435 }
5436 else
5437 {
5438 /* Exception pending - don't change TOP or the register stack. */
5439 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5440 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5441 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5442 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5443 }
5444}
5445
5446
5447/**
5448 * Worker routine for raising an FPU stack overflow exception on a push.
5449 *
5450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5451 * @param pFpuCtx The FPU context.
5452 */
5453static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5454{
5455 if (pFpuCtx->FCW & X86_FCW_IM)
5456 {
5457 /* Masked overflow. */
5458 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5459 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5460 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5461 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5462 pFpuCtx->FTW |= RT_BIT(iNewTop);
5463 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5464 iemFpuRotateStackPush(pFpuCtx);
5465 }
5466 else
5467 {
5468 /* Exception pending - don't change TOP or the register stack. */
5469 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5470 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5471 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5472 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5473 }
5474 RT_NOREF(pVCpu);
5475}
5476
5477
5478/**
5479 * Raises a FPU stack overflow exception on a push.
5480 *
5481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5482 * @param uFpuOpcode The FPU opcode value.
5483 */
5484void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5485{
5486 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5487 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5488 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5489}
5490
5491
5492/**
5493 * Raises a FPU stack overflow exception on a push with a memory operand.
5494 *
5495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5496 * @param iEffSeg The effective memory operand selector register.
5497 * @param GCPtrEff The effective memory operand offset.
5498 * @param uFpuOpcode The FPU opcode value.
5499 */
5500void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5501{
5502 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5503 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5504 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5505 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5506}
5507
5508/** @} */
5509
5510
5511/** @name Memory access.
5512 *
5513 * @{
5514 */
5515
5516#undef LOG_GROUP
5517#define LOG_GROUP LOG_GROUP_IEM_MEM
5518
5519/**
5520 * Updates the IEMCPU::cbWritten counter if applicable.
5521 *
5522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5523 * @param fAccess The access being accounted for.
5524 * @param cbMem The access size.
5525 */
5526DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5527{
5528 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5529 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5530 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5531}
5532
5533
5534/**
5535 * Applies the segment limit, base and attributes.
5536 *
5537 * This may raise a \#GP or \#SS.
5538 *
5539 * @returns VBox strict status code.
5540 *
5541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5542 * @param fAccess The kind of access which is being performed.
5543 * @param iSegReg The index of the segment register to apply.
5544 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5545 * TSS, ++).
5546 * @param cbMem The access size.
5547 * @param pGCPtrMem Pointer to the guest memory address to apply
5548 * segmentation to. Input and output parameter.
5549 */
5550VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5551{
5552 if (iSegReg == UINT8_MAX)
5553 return VINF_SUCCESS;
5554
5555 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5556 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5557 switch (IEM_GET_CPU_MODE(pVCpu))
5558 {
5559 case IEMMODE_16BIT:
5560 case IEMMODE_32BIT:
5561 {
5562 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5563 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5564
5565 if ( pSel->Attr.n.u1Present
5566 && !pSel->Attr.n.u1Unusable)
5567 {
5568 Assert(pSel->Attr.n.u1DescType);
5569 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5570 {
5571 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5572 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5573 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5574
5575 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5576 {
5577 /** @todo CPL check. */
5578 }
5579
5580 /*
5581 * There are two kinds of data selectors, normal and expand down.
5582 */
5583 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5584 {
5585 if ( GCPtrFirst32 > pSel->u32Limit
5586 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5587 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5588 }
5589 else
5590 {
5591 /*
5592 * The upper boundary is defined by the B bit, not the G bit!
5593 */
5594 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5595 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5596 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5597 }
5598 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5599 }
5600 else
5601 {
5602 /*
5603 * Code selector and usually be used to read thru, writing is
5604 * only permitted in real and V8086 mode.
5605 */
5606 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5607 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5608 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5609 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5610 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5611
5612 if ( GCPtrFirst32 > pSel->u32Limit
5613 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5614 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5615
5616 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5617 {
5618 /** @todo CPL check. */
5619 }
5620
5621 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5622 }
5623 }
5624 else
5625 return iemRaiseGeneralProtectionFault0(pVCpu);
5626 return VINF_SUCCESS;
5627 }
5628
5629 case IEMMODE_64BIT:
5630 {
5631 RTGCPTR GCPtrMem = *pGCPtrMem;
5632 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5633 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5634
5635 Assert(cbMem >= 1);
5636 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5637 return VINF_SUCCESS;
5638 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5639 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5640 return iemRaiseGeneralProtectionFault0(pVCpu);
5641 }
5642
5643 default:
5644 AssertFailedReturn(VERR_IEM_IPE_7);
5645 }
5646}
5647
5648
5649/**
5650 * Translates a virtual address to a physical physical address and checks if we
5651 * can access the page as specified.
5652 *
5653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5654 * @param GCPtrMem The virtual address.
5655 * @param cbAccess The access size, for raising \#PF correctly for
5656 * FXSAVE and such.
5657 * @param fAccess The intended access.
5658 * @param pGCPhysMem Where to return the physical address.
5659 */
5660VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5661 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5662{
5663 /** @todo Need a different PGM interface here. We're currently using
5664 * generic / REM interfaces. this won't cut it for R0. */
5665 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5666 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5667 * here. */
5668 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5669 PGMPTWALKFAST WalkFast;
5670 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5671 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5672 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5673 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5674 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5675 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5676 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5677 fQPage |= PGMQPAGE_F_USER_MODE;
5678 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5679 if (RT_SUCCESS(rc))
5680 {
5681 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5682
5683 /* If the page is writable and does not have the no-exec bit set, all
5684 access is allowed. Otherwise we'll have to check more carefully... */
5685 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
5686 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
5687 || (WalkFast.fEffective & X86_PTE_RW)
5688 || ( ( IEM_GET_CPL(pVCpu) != 3
5689 || (fAccess & IEM_ACCESS_WHAT_SYS))
5690 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
5691 && ( (WalkFast.fEffective & X86_PTE_US)
5692 || IEM_GET_CPL(pVCpu) != 3
5693 || (fAccess & IEM_ACCESS_WHAT_SYS) )
5694 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
5695 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
5696 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5697 )
5698 );
5699
5700 /* PGMGstQueryPageFast sets the A & D bits. */
5701 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5702 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
5703
5704 *pGCPhysMem = WalkFast.GCPhys;
5705 return VINF_SUCCESS;
5706 }
5707
5708 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5709 /** @todo Check unassigned memory in unpaged mode. */
5710#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5711 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
5712 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5713#endif
5714 *pGCPhysMem = NIL_RTGCPHYS;
5715 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5716}
5717
5718#if 0 /*unused*/
5719/**
5720 * Looks up a memory mapping entry.
5721 *
5722 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5724 * @param pvMem The memory address.
5725 * @param fAccess The access to.
5726 */
5727DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5728{
5729 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5730 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5731 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5732 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5733 return 0;
5734 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5735 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5736 return 1;
5737 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5738 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5739 return 2;
5740 return VERR_NOT_FOUND;
5741}
5742#endif
5743
5744/**
5745 * Finds a free memmap entry when using iNextMapping doesn't work.
5746 *
5747 * @returns Memory mapping index, 1024 on failure.
5748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5749 */
5750static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5751{
5752 /*
5753 * The easy case.
5754 */
5755 if (pVCpu->iem.s.cActiveMappings == 0)
5756 {
5757 pVCpu->iem.s.iNextMapping = 1;
5758 return 0;
5759 }
5760
5761 /* There should be enough mappings for all instructions. */
5762 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5763
5764 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5765 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5766 return i;
5767
5768 AssertFailedReturn(1024);
5769}
5770
5771
5772/**
5773 * Commits a bounce buffer that needs writing back and unmaps it.
5774 *
5775 * @returns Strict VBox status code.
5776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5777 * @param iMemMap The index of the buffer to commit.
5778 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5779 * Always false in ring-3, obviously.
5780 */
5781static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5782{
5783 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5784 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5785#ifdef IN_RING3
5786 Assert(!fPostponeFail);
5787 RT_NOREF_PV(fPostponeFail);
5788#endif
5789
5790 /*
5791 * Do the writing.
5792 */
5793 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5794 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5795 {
5796 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5797 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5798 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5799 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5800 {
5801 /*
5802 * Carefully and efficiently dealing with access handler return
5803 * codes make this a little bloated.
5804 */
5805 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5807 pbBuf,
5808 cbFirst,
5809 PGMACCESSORIGIN_IEM);
5810 if (rcStrict == VINF_SUCCESS)
5811 {
5812 if (cbSecond)
5813 {
5814 rcStrict = PGMPhysWrite(pVM,
5815 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5816 pbBuf + cbFirst,
5817 cbSecond,
5818 PGMACCESSORIGIN_IEM);
5819 if (rcStrict == VINF_SUCCESS)
5820 { /* nothing */ }
5821 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5822 {
5823 LogEx(LOG_GROUP_IEM,
5824 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5825 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5826 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5827 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5828 }
5829#ifndef IN_RING3
5830 else if (fPostponeFail)
5831 {
5832 LogEx(LOG_GROUP_IEM,
5833 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5835 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5836 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5837 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5838 return iemSetPassUpStatus(pVCpu, rcStrict);
5839 }
5840#endif
5841 else
5842 {
5843 LogEx(LOG_GROUP_IEM,
5844 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5845 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5846 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5847 return rcStrict;
5848 }
5849 }
5850 }
5851 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5852 {
5853 if (!cbSecond)
5854 {
5855 LogEx(LOG_GROUP_IEM,
5856 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5858 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5859 }
5860 else
5861 {
5862 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5864 pbBuf + cbFirst,
5865 cbSecond,
5866 PGMACCESSORIGIN_IEM);
5867 if (rcStrict2 == VINF_SUCCESS)
5868 {
5869 LogEx(LOG_GROUP_IEM,
5870 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5873 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5874 }
5875 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5876 {
5877 LogEx(LOG_GROUP_IEM,
5878 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5879 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5880 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5881 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5882 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5883 }
5884#ifndef IN_RING3
5885 else if (fPostponeFail)
5886 {
5887 LogEx(LOG_GROUP_IEM,
5888 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5889 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5890 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5891 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5892 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5893 return iemSetPassUpStatus(pVCpu, rcStrict);
5894 }
5895#endif
5896 else
5897 {
5898 LogEx(LOG_GROUP_IEM,
5899 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5901 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5902 return rcStrict2;
5903 }
5904 }
5905 }
5906#ifndef IN_RING3
5907 else if (fPostponeFail)
5908 {
5909 LogEx(LOG_GROUP_IEM,
5910 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5912 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5913 if (!cbSecond)
5914 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5915 else
5916 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5917 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5918 return iemSetPassUpStatus(pVCpu, rcStrict);
5919 }
5920#endif
5921 else
5922 {
5923 LogEx(LOG_GROUP_IEM,
5924 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5925 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5926 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5927 return rcStrict;
5928 }
5929 }
5930 else
5931 {
5932 /*
5933 * No access handlers, much simpler.
5934 */
5935 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5936 if (RT_SUCCESS(rc))
5937 {
5938 if (cbSecond)
5939 {
5940 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5941 if (RT_SUCCESS(rc))
5942 { /* likely */ }
5943 else
5944 {
5945 LogEx(LOG_GROUP_IEM,
5946 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5947 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5948 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5949 return rc;
5950 }
5951 }
5952 }
5953 else
5954 {
5955 LogEx(LOG_GROUP_IEM,
5956 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5957 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5958 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5959 return rc;
5960 }
5961 }
5962 }
5963
5964#if defined(IEM_LOG_MEMORY_WRITES)
5965 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5966 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5967 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5968 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5969 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5970 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5971
5972 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5973 g_cbIemWrote = cbWrote;
5974 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5975#endif
5976
5977 /*
5978 * Free the mapping entry.
5979 */
5980 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5981 Assert(pVCpu->iem.s.cActiveMappings != 0);
5982 pVCpu->iem.s.cActiveMappings--;
5983 return VINF_SUCCESS;
5984}
5985
5986
5987/**
5988 * iemMemMap worker that deals with a request crossing pages.
5989 */
5990static VBOXSTRICTRC
5991iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
5992 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5993{
5994 Assert(cbMem <= GUEST_PAGE_SIZE);
5995
5996 /*
5997 * Do the address translations.
5998 */
5999 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6000 RTGCPHYS GCPhysFirst;
6001 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6002 if (rcStrict != VINF_SUCCESS)
6003 return rcStrict;
6004 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6005
6006 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6007 RTGCPHYS GCPhysSecond;
6008 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6009 cbSecondPage, fAccess, &GCPhysSecond);
6010 if (rcStrict != VINF_SUCCESS)
6011 return rcStrict;
6012 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6013 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6014
6015 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6016
6017 /*
6018 * Read in the current memory content if it's a read, execute or partial
6019 * write access.
6020 */
6021 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6022
6023 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6024 {
6025 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6026 {
6027 /*
6028 * Must carefully deal with access handler status codes here,
6029 * makes the code a bit bloated.
6030 */
6031 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6032 if (rcStrict == VINF_SUCCESS)
6033 {
6034 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6035 if (rcStrict == VINF_SUCCESS)
6036 { /*likely */ }
6037 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6038 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6039 else
6040 {
6041 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6042 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6043 return rcStrict;
6044 }
6045 }
6046 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6047 {
6048 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6049 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6050 {
6051 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6052 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6053 }
6054 else
6055 {
6056 LogEx(LOG_GROUP_IEM,
6057 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6058 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6059 return rcStrict2;
6060 }
6061 }
6062 else
6063 {
6064 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6065 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6066 return rcStrict;
6067 }
6068 }
6069 else
6070 {
6071 /*
6072 * No informational status codes here, much more straight forward.
6073 */
6074 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6075 if (RT_SUCCESS(rc))
6076 {
6077 Assert(rc == VINF_SUCCESS);
6078 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6079 if (RT_SUCCESS(rc))
6080 Assert(rc == VINF_SUCCESS);
6081 else
6082 {
6083 LogEx(LOG_GROUP_IEM,
6084 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6085 return rc;
6086 }
6087 }
6088 else
6089 {
6090 LogEx(LOG_GROUP_IEM,
6091 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6092 return rc;
6093 }
6094 }
6095 }
6096#ifdef VBOX_STRICT
6097 else
6098 memset(pbBuf, 0xcc, cbMem);
6099 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6100 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6101#endif
6102 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6103
6104 /*
6105 * Commit the bounce buffer entry.
6106 */
6107 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6108 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6109 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6110 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6111 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6112 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6113 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6114 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6115 pVCpu->iem.s.cActiveMappings++;
6116
6117 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6118 *ppvMem = pbBuf;
6119 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6120 return VINF_SUCCESS;
6121}
6122
6123
6124/**
6125 * iemMemMap woker that deals with iemMemPageMap failures.
6126 */
6127static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6128 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6129{
6130 /*
6131 * Filter out conditions we can handle and the ones which shouldn't happen.
6132 */
6133 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6134 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6135 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6136 {
6137 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6138 return rcMap;
6139 }
6140 pVCpu->iem.s.cPotentialExits++;
6141
6142 /*
6143 * Read in the current memory content if it's a read, execute or partial
6144 * write access.
6145 */
6146 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6147 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6148 {
6149 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6150 memset(pbBuf, 0xff, cbMem);
6151 else
6152 {
6153 int rc;
6154 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6155 {
6156 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6157 if (rcStrict == VINF_SUCCESS)
6158 { /* nothing */ }
6159 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6160 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6161 else
6162 {
6163 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6164 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6165 return rcStrict;
6166 }
6167 }
6168 else
6169 {
6170 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6171 if (RT_SUCCESS(rc))
6172 { /* likely */ }
6173 else
6174 {
6175 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6176 GCPhysFirst, rc));
6177 return rc;
6178 }
6179 }
6180 }
6181 }
6182#ifdef VBOX_STRICT
6183 else
6184 memset(pbBuf, 0xcc, cbMem);
6185#endif
6186#ifdef VBOX_STRICT
6187 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6188 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6189#endif
6190
6191 /*
6192 * Commit the bounce buffer entry.
6193 */
6194 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6195 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6196 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6197 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6198 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6199 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6200 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6201 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6202 pVCpu->iem.s.cActiveMappings++;
6203
6204 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6205 *ppvMem = pbBuf;
6206 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6207 return VINF_SUCCESS;
6208}
6209
6210
6211
6212/**
6213 * Maps the specified guest memory for the given kind of access.
6214 *
6215 * This may be using bounce buffering of the memory if it's crossing a page
6216 * boundary or if there is an access handler installed for any of it. Because
6217 * of lock prefix guarantees, we're in for some extra clutter when this
6218 * happens.
6219 *
6220 * This may raise a \#GP, \#SS, \#PF or \#AC.
6221 *
6222 * @returns VBox strict status code.
6223 *
6224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6225 * @param ppvMem Where to return the pointer to the mapped memory.
6226 * @param pbUnmapInfo Where to return unmap info to be passed to
6227 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6228 * done.
6229 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6230 * 8, 12, 16, 32 or 512. When used by string operations
6231 * it can be up to a page.
6232 * @param iSegReg The index of the segment register to use for this
6233 * access. The base and limits are checked. Use UINT8_MAX
6234 * to indicate that no segmentation is required (for IDT,
6235 * GDT and LDT accesses).
6236 * @param GCPtrMem The address of the guest memory.
6237 * @param fAccess How the memory is being accessed. The
6238 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6239 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6240 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6241 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6242 * set.
6243 * @param uAlignCtl Alignment control:
6244 * - Bits 15:0 is the alignment mask.
6245 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6246 * IEM_MEMMAP_F_ALIGN_SSE, and
6247 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6248 * Pass zero to skip alignment.
6249 */
6250VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6251 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6252{
6253 /*
6254 * Check the input and figure out which mapping entry to use.
6255 */
6256 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6257 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6258 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6259 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6260 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6261
6262 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6263 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6264 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6265 {
6266 iMemMap = iemMemMapFindFree(pVCpu);
6267 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6268 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6269 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6270 pVCpu->iem.s.aMemMappings[2].fAccess),
6271 VERR_IEM_IPE_9);
6272 }
6273
6274 /*
6275 * Map the memory, checking that we can actually access it. If something
6276 * slightly complicated happens, fall back on bounce buffering.
6277 */
6278 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6279 if (rcStrict == VINF_SUCCESS)
6280 { /* likely */ }
6281 else
6282 return rcStrict;
6283
6284 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6285 { /* likely */ }
6286 else
6287 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6288
6289 /*
6290 * Alignment check.
6291 */
6292 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6293 { /* likelyish */ }
6294 else
6295 {
6296 /* Misaligned access. */
6297 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6298 {
6299 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6300 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6301 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6302 {
6303 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6304
6305 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6306 return iemRaiseAlignmentCheckException(pVCpu);
6307 }
6308 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6309 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6310 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6311 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6312 * that's what FXSAVE does on a 10980xe. */
6313 && iemMemAreAlignmentChecksEnabled(pVCpu))
6314 return iemRaiseAlignmentCheckException(pVCpu);
6315 else
6316 return iemRaiseGeneralProtectionFault0(pVCpu);
6317 }
6318
6319#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6320 /* If the access is atomic there are host platform alignmnet restrictions
6321 we need to conform with. */
6322 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6323# if defined(RT_ARCH_AMD64)
6324 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6325# elif defined(RT_ARCH_ARM64)
6326 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6327# else
6328# error port me
6329# endif
6330 )
6331 { /* okay */ }
6332 else
6333 {
6334 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6335 pVCpu->iem.s.cMisalignedAtomics += 1;
6336 return VINF_EM_EMULATE_SPLIT_LOCK;
6337 }
6338#endif
6339 }
6340
6341#ifdef IEM_WITH_DATA_TLB
6342 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6343
6344 /*
6345 * Get the TLB entry for this page and check PT flags.
6346 *
6347 * We reload the TLB entry if we need to set the dirty bit (accessed
6348 * should in theory always be set).
6349 */
6350 uint8_t *pbMem = NULL;
6351 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6352 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6353 if ( pTlbe->uTag == uTag
6354 && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0))) )
6355 {
6356 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6357
6358 /* If the page is either supervisor only or non-writable, we need to do
6359 more careful access checks. */
6360 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6361 {
6362 /* Write to read only memory? */
6363 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6364 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6365 && ( ( IEM_GET_CPL(pVCpu) == 3
6366 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6367 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6368 {
6369 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6370 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6371 }
6372
6373 /* Kernel memory accessed by userland? */
6374 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6375 && IEM_GET_CPL(pVCpu) == 3
6376 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6377 {
6378 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6379 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6380 }
6381 }
6382
6383 /* Look up the physical page info if necessary. */
6384 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6385# ifdef IN_RING3
6386 pbMem = pTlbe->pbMappingR3;
6387# else
6388 pbMem = NULL;
6389# endif
6390 else
6391 {
6392 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6393 { /* likely */ }
6394 else
6395 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6396 pTlbe->pbMappingR3 = NULL;
6397 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6398 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6399 &pbMem, &pTlbe->fFlagsAndPhysRev);
6400 AssertRCReturn(rc, rc);
6401# ifdef IN_RING3
6402 pTlbe->pbMappingR3 = pbMem;
6403# endif
6404 }
6405 }
6406 else
6407 {
6408 pVCpu->iem.s.DataTlb.cTlbMisses++;
6409
6410 /* This page table walking will set A bits as required by the access while performing the walk.
6411 ASSUMES these are set when the address is translated rather than on commit... */
6412 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6413 PGMPTWALKFAST WalkFast;
6414 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6415 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6416 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6417 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6418 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6419 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6420 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6421 fQPage |= PGMQPAGE_F_USER_MODE;
6422 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6423 if (RT_SUCCESS(rc))
6424 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6425 else
6426 {
6427 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6428# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6429 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6430 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6431# endif
6432 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6433 }
6434
6435 pTlbe->uTag = uTag;
6436 pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6437 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6438 pTlbe->GCPhys = GCPhysPg;
6439 pTlbe->pbMappingR3 = NULL;
6440 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6441 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6442 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6443 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6444 || IEM_GET_CPL(pVCpu) != 3
6445 || (fAccess & IEM_ACCESS_WHAT_SYS));
6446
6447 /* Resolve the physical address. */
6448 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6449 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6450 &pbMem, &pTlbe->fFlagsAndPhysRev);
6451 AssertRCReturn(rc, rc);
6452# ifdef IN_RING3
6453 pTlbe->pbMappingR3 = pbMem;
6454# endif
6455 }
6456
6457 /*
6458 * Check the physical page level access and mapping.
6459 */
6460 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6461 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6462 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6463 { /* probably likely */ }
6464 else
6465 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6466 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6467 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6468 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6469 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6470 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6471
6472 if (pbMem)
6473 {
6474 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6475 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6476 fAccess |= IEM_ACCESS_NOT_LOCKED;
6477 }
6478 else
6479 {
6480 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6481 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6482 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6483 if (rcStrict != VINF_SUCCESS)
6484 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6485 }
6486
6487 void * const pvMem = pbMem;
6488
6489 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6490 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6491 if (fAccess & IEM_ACCESS_TYPE_READ)
6492 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6493
6494#else /* !IEM_WITH_DATA_TLB */
6495
6496 RTGCPHYS GCPhysFirst;
6497 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6498 if (rcStrict != VINF_SUCCESS)
6499 return rcStrict;
6500
6501 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6502 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6503 if (fAccess & IEM_ACCESS_TYPE_READ)
6504 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6505
6506 void *pvMem;
6507 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6508 if (rcStrict != VINF_SUCCESS)
6509 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6510
6511#endif /* !IEM_WITH_DATA_TLB */
6512
6513 /*
6514 * Fill in the mapping table entry.
6515 */
6516 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6517 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6518 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6519 pVCpu->iem.s.cActiveMappings += 1;
6520
6521 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6522 *ppvMem = pvMem;
6523 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6524 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6525 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6526
6527 return VINF_SUCCESS;
6528}
6529
6530
6531/**
6532 * Commits the guest memory if bounce buffered and unmaps it.
6533 *
6534 * @returns Strict VBox status code.
6535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6536 * @param bUnmapInfo Unmap info set by iemMemMap.
6537 */
6538VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6539{
6540 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6541 AssertMsgReturn( (bUnmapInfo & 0x08)
6542 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6543 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6544 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6545 VERR_NOT_FOUND);
6546
6547 /* If it's bounce buffered, we may need to write back the buffer. */
6548 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6549 {
6550 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6551 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6552 }
6553 /* Otherwise unlock it. */
6554 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6555 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6556
6557 /* Free the entry. */
6558 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6559 Assert(pVCpu->iem.s.cActiveMappings != 0);
6560 pVCpu->iem.s.cActiveMappings--;
6561 return VINF_SUCCESS;
6562}
6563
6564
6565/**
6566 * Rolls back the guest memory (conceptually only) and unmaps it.
6567 *
6568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6569 * @param bUnmapInfo Unmap info set by iemMemMap.
6570 */
6571void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6572{
6573 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6574 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6575 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6576 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6577 == ((unsigned)bUnmapInfo >> 4),
6578 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6579
6580 /* Unlock it if necessary. */
6581 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6582 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6583
6584 /* Free the entry. */
6585 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6586 Assert(pVCpu->iem.s.cActiveMappings != 0);
6587 pVCpu->iem.s.cActiveMappings--;
6588}
6589
6590#ifdef IEM_WITH_SETJMP
6591
6592/**
6593 * Maps the specified guest memory for the given kind of access, longjmp on
6594 * error.
6595 *
6596 * This may be using bounce buffering of the memory if it's crossing a page
6597 * boundary or if there is an access handler installed for any of it. Because
6598 * of lock prefix guarantees, we're in for some extra clutter when this
6599 * happens.
6600 *
6601 * This may raise a \#GP, \#SS, \#PF or \#AC.
6602 *
6603 * @returns Pointer to the mapped memory.
6604 *
6605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6606 * @param bUnmapInfo Where to return unmap info to be passed to
6607 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6608 * iemMemCommitAndUnmapWoSafeJmp,
6609 * iemMemCommitAndUnmapRoSafeJmp,
6610 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6611 * when done.
6612 * @param cbMem The number of bytes to map. This is usually 1,
6613 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6614 * string operations it can be up to a page.
6615 * @param iSegReg The index of the segment register to use for
6616 * this access. The base and limits are checked.
6617 * Use UINT8_MAX to indicate that no segmentation
6618 * is required (for IDT, GDT and LDT accesses).
6619 * @param GCPtrMem The address of the guest memory.
6620 * @param fAccess How the memory is being accessed. The
6621 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6622 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6623 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6624 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6625 * set.
6626 * @param uAlignCtl Alignment control:
6627 * - Bits 15:0 is the alignment mask.
6628 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6629 * IEM_MEMMAP_F_ALIGN_SSE, and
6630 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6631 * Pass zero to skip alignment.
6632 */
6633void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6634 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6635{
6636 /*
6637 * Check the input, check segment access and adjust address
6638 * with segment base.
6639 */
6640 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6641 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6642 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6643
6644 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6645 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6646 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6647
6648 /*
6649 * Alignment check.
6650 */
6651 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6652 { /* likelyish */ }
6653 else
6654 {
6655 /* Misaligned access. */
6656 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6657 {
6658 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6659 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6660 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6661 {
6662 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6663
6664 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6665 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6666 }
6667 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6668 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6669 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6670 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6671 * that's what FXSAVE does on a 10980xe. */
6672 && iemMemAreAlignmentChecksEnabled(pVCpu))
6673 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6674 else
6675 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6676 }
6677
6678#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6679 /* If the access is atomic there are host platform alignmnet restrictions
6680 we need to conform with. */
6681 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6682# if defined(RT_ARCH_AMD64)
6683 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6684# elif defined(RT_ARCH_ARM64)
6685 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6686# else
6687# error port me
6688# endif
6689 )
6690 { /* okay */ }
6691 else
6692 {
6693 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6694 pVCpu->iem.s.cMisalignedAtomics += 1;
6695 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6696 }
6697#endif
6698 }
6699
6700 /*
6701 * Figure out which mapping entry to use.
6702 */
6703 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6704 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6705 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6706 {
6707 iMemMap = iemMemMapFindFree(pVCpu);
6708 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6709 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6710 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6711 pVCpu->iem.s.aMemMappings[2].fAccess),
6712 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6713 }
6714
6715 /*
6716 * Crossing a page boundary?
6717 */
6718 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6719 { /* No (likely). */ }
6720 else
6721 {
6722 void *pvMem;
6723 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6724 if (rcStrict == VINF_SUCCESS)
6725 return pvMem;
6726 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6727 }
6728
6729#ifdef IEM_WITH_DATA_TLB
6730 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6731
6732 /*
6733 * Get the TLB entry for this page checking that it has the A & D bits
6734 * set as per fAccess flags.
6735 */
6736 /** @todo make the caller pass these in with fAccess. */
6737 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6738 ? IEMTLBE_F_PT_NO_USER : 0;
6739 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6740 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6741 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6742 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6743 ? IEMTLBE_F_PT_NO_WRITE : 0)
6744 : 0;
6745 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6746
6747 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6748 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6749 if ( pTlbe->uTag == uTag
6750 && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY))) )
6751 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6752 else
6753 {
6754 pVCpu->iem.s.DataTlb.cTlbMisses++;
6755
6756 /* This page table walking will set A and D bits as required by the
6757 access while performing the walk.
6758 ASSUMES these are set when the address is translated rather than on commit... */
6759 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6760 PGMPTWALKFAST WalkFast;
6761 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6762 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6763 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6764 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6765 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6766 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6767 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6768 fQPage |= PGMQPAGE_F_USER_MODE;
6769 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6770 if (RT_SUCCESS(rc))
6771 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6772 else
6773 {
6774 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6775# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6776 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6777 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6778# endif
6779 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6780 }
6781
6782 pTlbe->uTag = uTag;
6783 pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6784 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6785 pTlbe->GCPhys = GCPhysPg;
6786 pTlbe->pbMappingR3 = NULL;
6787 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
6788 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
6789 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
6790
6791 /* Resolve the physical address. */
6792 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6793 uint8_t *pbMemFullLoad = NULL;
6794 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6795 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
6796 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6797# ifdef IN_RING3
6798 pTlbe->pbMappingR3 = pbMemFullLoad;
6799# endif
6800 }
6801
6802 /*
6803 * Check the flags and physical revision.
6804 * Note! This will revalidate the uTlbPhysRev after a full load. This is
6805 * just to keep the code structure simple (i.e. avoid gotos or similar).
6806 */
6807 uint8_t *pbMem;
6808 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6809 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6810# ifdef IN_RING3
6811 pbMem = pTlbe->pbMappingR3;
6812# else
6813 pbMem = NULL;
6814# endif
6815 else
6816 {
6817 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
6818
6819 /*
6820 * Okay, something isn't quite right or needs refreshing.
6821 */
6822 /* Write to read only memory? */
6823 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6824 {
6825 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6826# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6827/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
6828 * to trigger an \#PG or a VM nested paging exit here yet! */
6829 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6830 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6831# endif
6832 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6833 }
6834
6835 /* Kernel memory accessed by userland? */
6836 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6837 {
6838 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6839# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6840/** @todo TLB: See above. */
6841 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6842 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6843# endif
6844 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6845 }
6846
6847 /*
6848 * Check if the physical page info needs updating.
6849 */
6850 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6851# ifdef IN_RING3
6852 pbMem = pTlbe->pbMappingR3;
6853# else
6854 pbMem = NULL;
6855# endif
6856 else
6857 {
6858 pTlbe->pbMappingR3 = NULL;
6859 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6860 pbMem = NULL;
6861 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6862 &pbMem, &pTlbe->fFlagsAndPhysRev);
6863 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6864# ifdef IN_RING3
6865 pTlbe->pbMappingR3 = pbMem;
6866# endif
6867 }
6868
6869 /*
6870 * Check the physical page level access and mapping.
6871 */
6872 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6873 { /* probably likely */ }
6874 else
6875 {
6876 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6877 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6878 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6879 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6880 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6881 if (rcStrict == VINF_SUCCESS)
6882 return pbMem;
6883 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6884 }
6885 }
6886 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6887
6888 if (pbMem)
6889 {
6890 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6891 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6892 fAccess |= IEM_ACCESS_NOT_LOCKED;
6893 }
6894 else
6895 {
6896 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6897 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6898 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6899 if (rcStrict == VINF_SUCCESS)
6900 {
6901 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6902 return pbMem;
6903 }
6904 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6905 }
6906
6907 void * const pvMem = pbMem;
6908
6909 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6910 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6911 if (fAccess & IEM_ACCESS_TYPE_READ)
6912 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6913
6914#else /* !IEM_WITH_DATA_TLB */
6915
6916
6917 RTGCPHYS GCPhysFirst;
6918 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6919 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6920 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6921
6922 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6923 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6924 if (fAccess & IEM_ACCESS_TYPE_READ)
6925 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6926
6927 void *pvMem;
6928 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6929 if (rcStrict == VINF_SUCCESS)
6930 { /* likely */ }
6931 else
6932 {
6933 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6934 if (rcStrict == VINF_SUCCESS)
6935 return pvMem;
6936 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6937 }
6938
6939#endif /* !IEM_WITH_DATA_TLB */
6940
6941 /*
6942 * Fill in the mapping table entry.
6943 */
6944 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6945 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6946 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6947 pVCpu->iem.s.cActiveMappings++;
6948
6949 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6950
6951 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6952 return pvMem;
6953}
6954
6955
6956/**
6957 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6958 *
6959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6960 * @param pvMem The mapping.
6961 * @param fAccess The kind of access.
6962 */
6963void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6964{
6965 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6966 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6967 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6968 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6969 == ((unsigned)bUnmapInfo >> 4),
6970 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6971
6972 /* If it's bounce buffered, we may need to write back the buffer. */
6973 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6974 {
6975 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6976 {
6977 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6978 if (rcStrict == VINF_SUCCESS)
6979 return;
6980 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6981 }
6982 }
6983 /* Otherwise unlock it. */
6984 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6985 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6986
6987 /* Free the entry. */
6988 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6989 Assert(pVCpu->iem.s.cActiveMappings != 0);
6990 pVCpu->iem.s.cActiveMappings--;
6991}
6992
6993
6994/** Fallback for iemMemCommitAndUnmapRwJmp. */
6995void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6996{
6997 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6998 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6999}
7000
7001
7002/** Fallback for iemMemCommitAndUnmapAtJmp. */
7003void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7004{
7005 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7006 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7007}
7008
7009
7010/** Fallback for iemMemCommitAndUnmapWoJmp. */
7011void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7012{
7013 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7014 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7015}
7016
7017
7018/** Fallback for iemMemCommitAndUnmapRoJmp. */
7019void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7020{
7021 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7022 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7023}
7024
7025
7026/** Fallback for iemMemRollbackAndUnmapWo. */
7027void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7028{
7029 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7030 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7031}
7032
7033#endif /* IEM_WITH_SETJMP */
7034
7035#ifndef IN_RING3
7036/**
7037 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7038 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7039 *
7040 * Allows the instruction to be completed and retired, while the IEM user will
7041 * return to ring-3 immediately afterwards and do the postponed writes there.
7042 *
7043 * @returns VBox status code (no strict statuses). Caller must check
7044 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7046 * @param pvMem The mapping.
7047 * @param fAccess The kind of access.
7048 */
7049VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7050{
7051 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7052 AssertMsgReturn( (bUnmapInfo & 0x08)
7053 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7054 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7055 == ((unsigned)bUnmapInfo >> 4),
7056 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7057 VERR_NOT_FOUND);
7058
7059 /* If it's bounce buffered, we may need to write back the buffer. */
7060 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7061 {
7062 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7063 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7064 }
7065 /* Otherwise unlock it. */
7066 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7067 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7068
7069 /* Free the entry. */
7070 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7071 Assert(pVCpu->iem.s.cActiveMappings != 0);
7072 pVCpu->iem.s.cActiveMappings--;
7073 return VINF_SUCCESS;
7074}
7075#endif
7076
7077
7078/**
7079 * Rollbacks mappings, releasing page locks and such.
7080 *
7081 * The caller shall only call this after checking cActiveMappings.
7082 *
7083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7084 */
7085void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7086{
7087 Assert(pVCpu->iem.s.cActiveMappings > 0);
7088
7089 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7090 while (iMemMap-- > 0)
7091 {
7092 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7093 if (fAccess != IEM_ACCESS_INVALID)
7094 {
7095 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7096 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7097 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7098 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7099 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7100 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7101 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7102 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7103 pVCpu->iem.s.cActiveMappings--;
7104 }
7105 }
7106}
7107
7108
7109/*
7110 * Instantiate R/W templates.
7111 */
7112#define TMPL_MEM_WITH_STACK
7113
7114#define TMPL_MEM_TYPE uint8_t
7115#define TMPL_MEM_FN_SUFF U8
7116#define TMPL_MEM_FMT_TYPE "%#04x"
7117#define TMPL_MEM_FMT_DESC "byte"
7118#include "IEMAllMemRWTmpl.cpp.h"
7119
7120#define TMPL_MEM_TYPE uint16_t
7121#define TMPL_MEM_FN_SUFF U16
7122#define TMPL_MEM_FMT_TYPE "%#06x"
7123#define TMPL_MEM_FMT_DESC "word"
7124#include "IEMAllMemRWTmpl.cpp.h"
7125
7126#define TMPL_WITH_PUSH_SREG
7127#define TMPL_MEM_TYPE uint32_t
7128#define TMPL_MEM_FN_SUFF U32
7129#define TMPL_MEM_FMT_TYPE "%#010x"
7130#define TMPL_MEM_FMT_DESC "dword"
7131#include "IEMAllMemRWTmpl.cpp.h"
7132#undef TMPL_WITH_PUSH_SREG
7133
7134#define TMPL_MEM_TYPE uint64_t
7135#define TMPL_MEM_FN_SUFF U64
7136#define TMPL_MEM_FMT_TYPE "%#018RX64"
7137#define TMPL_MEM_FMT_DESC "qword"
7138#include "IEMAllMemRWTmpl.cpp.h"
7139
7140#undef TMPL_MEM_WITH_STACK
7141
7142#define TMPL_MEM_TYPE uint64_t
7143#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7144#define TMPL_MEM_FN_SUFF U64AlignedU128
7145#define TMPL_MEM_FMT_TYPE "%#018RX64"
7146#define TMPL_MEM_FMT_DESC "qword"
7147#include "IEMAllMemRWTmpl.cpp.h"
7148
7149/* See IEMAllMemRWTmplInline.cpp.h */
7150#define TMPL_MEM_BY_REF
7151
7152#define TMPL_MEM_TYPE RTFLOAT80U
7153#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7154#define TMPL_MEM_FN_SUFF R80
7155#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7156#define TMPL_MEM_FMT_DESC "tword"
7157#include "IEMAllMemRWTmpl.cpp.h"
7158
7159#define TMPL_MEM_TYPE RTPBCD80U
7160#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7161#define TMPL_MEM_FN_SUFF D80
7162#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7163#define TMPL_MEM_FMT_DESC "tword"
7164#include "IEMAllMemRWTmpl.cpp.h"
7165
7166#define TMPL_MEM_TYPE RTUINT128U
7167#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7168#define TMPL_MEM_FN_SUFF U128
7169#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7170#define TMPL_MEM_FMT_DESC "dqword"
7171#include "IEMAllMemRWTmpl.cpp.h"
7172
7173#define TMPL_MEM_TYPE RTUINT128U
7174#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7175#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7176#define TMPL_MEM_FN_SUFF U128AlignedSse
7177#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7178#define TMPL_MEM_FMT_DESC "dqword"
7179#include "IEMAllMemRWTmpl.cpp.h"
7180
7181#define TMPL_MEM_TYPE RTUINT128U
7182#define TMPL_MEM_TYPE_ALIGN 0
7183#define TMPL_MEM_FN_SUFF U128NoAc
7184#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7185#define TMPL_MEM_FMT_DESC "dqword"
7186#include "IEMAllMemRWTmpl.cpp.h"
7187
7188#define TMPL_MEM_TYPE RTUINT256U
7189#define TMPL_MEM_TYPE_ALIGN 0
7190#define TMPL_MEM_FN_SUFF U256NoAc
7191#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7192#define TMPL_MEM_FMT_DESC "qqword"
7193#include "IEMAllMemRWTmpl.cpp.h"
7194
7195#define TMPL_MEM_TYPE RTUINT256U
7196#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7197#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7198#define TMPL_MEM_FN_SUFF U256AlignedAvx
7199#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7200#define TMPL_MEM_FMT_DESC "qqword"
7201#include "IEMAllMemRWTmpl.cpp.h"
7202
7203/**
7204 * Fetches a data dword and zero extends it to a qword.
7205 *
7206 * @returns Strict VBox status code.
7207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7208 * @param pu64Dst Where to return the qword.
7209 * @param iSegReg The index of the segment register to use for
7210 * this access. The base and limits are checked.
7211 * @param GCPtrMem The address of the guest memory.
7212 */
7213VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7214{
7215 /* The lazy approach for now... */
7216 uint8_t bUnmapInfo;
7217 uint32_t const *pu32Src;
7218 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7219 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7220 if (rc == VINF_SUCCESS)
7221 {
7222 *pu64Dst = *pu32Src;
7223 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7224 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7225 }
7226 return rc;
7227}
7228
7229
7230#ifdef SOME_UNUSED_FUNCTION
7231/**
7232 * Fetches a data dword and sign extends it to a qword.
7233 *
7234 * @returns Strict VBox status code.
7235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7236 * @param pu64Dst Where to return the sign extended value.
7237 * @param iSegReg The index of the segment register to use for
7238 * this access. The base and limits are checked.
7239 * @param GCPtrMem The address of the guest memory.
7240 */
7241VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7242{
7243 /* The lazy approach for now... */
7244 uint8_t bUnmapInfo;
7245 int32_t const *pi32Src;
7246 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7247 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7248 if (rc == VINF_SUCCESS)
7249 {
7250 *pu64Dst = *pi32Src;
7251 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7252 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7253 }
7254#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7255 else
7256 *pu64Dst = 0;
7257#endif
7258 return rc;
7259}
7260#endif
7261
7262
7263/**
7264 * Fetches a descriptor register (lgdt, lidt).
7265 *
7266 * @returns Strict VBox status code.
7267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7268 * @param pcbLimit Where to return the limit.
7269 * @param pGCPtrBase Where to return the base.
7270 * @param iSegReg The index of the segment register to use for
7271 * this access. The base and limits are checked.
7272 * @param GCPtrMem The address of the guest memory.
7273 * @param enmOpSize The effective operand size.
7274 */
7275VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7276 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7277{
7278 /*
7279 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7280 * little special:
7281 * - The two reads are done separately.
7282 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7283 * - We suspect the 386 to actually commit the limit before the base in
7284 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7285 * don't try emulate this eccentric behavior, because it's not well
7286 * enough understood and rather hard to trigger.
7287 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7288 */
7289 VBOXSTRICTRC rcStrict;
7290 if (IEM_IS_64BIT_CODE(pVCpu))
7291 {
7292 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7293 if (rcStrict == VINF_SUCCESS)
7294 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7295 }
7296 else
7297 {
7298 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7299 if (enmOpSize == IEMMODE_32BIT)
7300 {
7301 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7302 {
7303 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7304 if (rcStrict == VINF_SUCCESS)
7305 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7306 }
7307 else
7308 {
7309 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7310 if (rcStrict == VINF_SUCCESS)
7311 {
7312 *pcbLimit = (uint16_t)uTmp;
7313 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7314 }
7315 }
7316 if (rcStrict == VINF_SUCCESS)
7317 *pGCPtrBase = uTmp;
7318 }
7319 else
7320 {
7321 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7322 if (rcStrict == VINF_SUCCESS)
7323 {
7324 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7325 if (rcStrict == VINF_SUCCESS)
7326 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7327 }
7328 }
7329 }
7330 return rcStrict;
7331}
7332
7333
7334/**
7335 * Stores a data dqword, SSE aligned.
7336 *
7337 * @returns Strict VBox status code.
7338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7339 * @param iSegReg The index of the segment register to use for
7340 * this access. The base and limits are checked.
7341 * @param GCPtrMem The address of the guest memory.
7342 * @param u128Value The value to store.
7343 */
7344VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7345{
7346 /* The lazy approach for now... */
7347 uint8_t bUnmapInfo;
7348 PRTUINT128U pu128Dst;
7349 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7350 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7351 if (rc == VINF_SUCCESS)
7352 {
7353 pu128Dst->au64[0] = u128Value.au64[0];
7354 pu128Dst->au64[1] = u128Value.au64[1];
7355 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7356 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7357 }
7358 return rc;
7359}
7360
7361
7362#ifdef IEM_WITH_SETJMP
7363/**
7364 * Stores a data dqword, SSE aligned.
7365 *
7366 * @returns Strict VBox status code.
7367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7368 * @param iSegReg The index of the segment register to use for
7369 * this access. The base and limits are checked.
7370 * @param GCPtrMem The address of the guest memory.
7371 * @param u128Value The value to store.
7372 */
7373void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7374 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7375{
7376 /* The lazy approach for now... */
7377 uint8_t bUnmapInfo;
7378 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7379 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7380 pu128Dst->au64[0] = u128Value.au64[0];
7381 pu128Dst->au64[1] = u128Value.au64[1];
7382 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7383 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7384}
7385#endif
7386
7387
7388/**
7389 * Stores a data dqword.
7390 *
7391 * @returns Strict VBox status code.
7392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7393 * @param iSegReg The index of the segment register to use for
7394 * this access. The base and limits are checked.
7395 * @param GCPtrMem The address of the guest memory.
7396 * @param pu256Value Pointer to the value to store.
7397 */
7398VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7399{
7400 /* The lazy approach for now... */
7401 uint8_t bUnmapInfo;
7402 PRTUINT256U pu256Dst;
7403 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7404 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7405 if (rc == VINF_SUCCESS)
7406 {
7407 pu256Dst->au64[0] = pu256Value->au64[0];
7408 pu256Dst->au64[1] = pu256Value->au64[1];
7409 pu256Dst->au64[2] = pu256Value->au64[2];
7410 pu256Dst->au64[3] = pu256Value->au64[3];
7411 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7412 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7413 }
7414 return rc;
7415}
7416
7417
7418#ifdef IEM_WITH_SETJMP
7419/**
7420 * Stores a data dqword, longjmp on error.
7421 *
7422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7423 * @param iSegReg The index of the segment register to use for
7424 * this access. The base and limits are checked.
7425 * @param GCPtrMem The address of the guest memory.
7426 * @param pu256Value Pointer to the value to store.
7427 */
7428void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7429{
7430 /* The lazy approach for now... */
7431 uint8_t bUnmapInfo;
7432 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7433 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7434 pu256Dst->au64[0] = pu256Value->au64[0];
7435 pu256Dst->au64[1] = pu256Value->au64[1];
7436 pu256Dst->au64[2] = pu256Value->au64[2];
7437 pu256Dst->au64[3] = pu256Value->au64[3];
7438 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7439 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7440}
7441#endif
7442
7443
7444/**
7445 * Stores a descriptor register (sgdt, sidt).
7446 *
7447 * @returns Strict VBox status code.
7448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7449 * @param cbLimit The limit.
7450 * @param GCPtrBase The base address.
7451 * @param iSegReg The index of the segment register to use for
7452 * this access. The base and limits are checked.
7453 * @param GCPtrMem The address of the guest memory.
7454 */
7455VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7456{
7457 /*
7458 * The SIDT and SGDT instructions actually stores the data using two
7459 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7460 * does not respond to opsize prefixes.
7461 */
7462 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7463 if (rcStrict == VINF_SUCCESS)
7464 {
7465 if (IEM_IS_16BIT_CODE(pVCpu))
7466 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7467 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7468 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7469 else if (IEM_IS_32BIT_CODE(pVCpu))
7470 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7471 else
7472 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7473 }
7474 return rcStrict;
7475}
7476
7477
7478/**
7479 * Begin a special stack push (used by interrupt, exceptions and such).
7480 *
7481 * This will raise \#SS or \#PF if appropriate.
7482 *
7483 * @returns Strict VBox status code.
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param cbMem The number of bytes to push onto the stack.
7486 * @param cbAlign The alignment mask (7, 3, 1).
7487 * @param ppvMem Where to return the pointer to the stack memory.
7488 * As with the other memory functions this could be
7489 * direct access or bounce buffered access, so
7490 * don't commit register until the commit call
7491 * succeeds.
7492 * @param pbUnmapInfo Where to store unmap info for
7493 * iemMemStackPushCommitSpecial.
7494 * @param puNewRsp Where to return the new RSP value. This must be
7495 * passed unchanged to
7496 * iemMemStackPushCommitSpecial().
7497 */
7498VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7499 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7500{
7501 Assert(cbMem < UINT8_MAX);
7502 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7503 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7504}
7505
7506
7507/**
7508 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7509 *
7510 * This will update the rSP.
7511 *
7512 * @returns Strict VBox status code.
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7515 * @param uNewRsp The new RSP value returned by
7516 * iemMemStackPushBeginSpecial().
7517 */
7518VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7519{
7520 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7521 if (rcStrict == VINF_SUCCESS)
7522 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7523 return rcStrict;
7524}
7525
7526
7527/**
7528 * Begin a special stack pop (used by iret, retf and such).
7529 *
7530 * This will raise \#SS or \#PF if appropriate.
7531 *
7532 * @returns Strict VBox status code.
7533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7534 * @param cbMem The number of bytes to pop from the stack.
7535 * @param cbAlign The alignment mask (7, 3, 1).
7536 * @param ppvMem Where to return the pointer to the stack memory.
7537 * @param pbUnmapInfo Where to store unmap info for
7538 * iemMemStackPopDoneSpecial.
7539 * @param puNewRsp Where to return the new RSP value. This must be
7540 * assigned to CPUMCTX::rsp manually some time
7541 * after iemMemStackPopDoneSpecial() has been
7542 * called.
7543 */
7544VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7545 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7546{
7547 Assert(cbMem < UINT8_MAX);
7548 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7549 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7550}
7551
7552
7553/**
7554 * Continue a special stack pop (used by iret and retf), for the purpose of
7555 * retrieving a new stack pointer.
7556 *
7557 * This will raise \#SS or \#PF if appropriate.
7558 *
7559 * @returns Strict VBox status code.
7560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7561 * @param off Offset from the top of the stack. This is zero
7562 * except in the retf case.
7563 * @param cbMem The number of bytes to pop from the stack.
7564 * @param ppvMem Where to return the pointer to the stack memory.
7565 * @param pbUnmapInfo Where to store unmap info for
7566 * iemMemStackPopDoneSpecial.
7567 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7568 * return this because all use of this function is
7569 * to retrieve a new value and anything we return
7570 * here would be discarded.)
7571 */
7572VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7573 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7574{
7575 Assert(cbMem < UINT8_MAX);
7576
7577 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7578 RTGCPTR GCPtrTop;
7579 if (IEM_IS_64BIT_CODE(pVCpu))
7580 GCPtrTop = uCurNewRsp;
7581 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7582 GCPtrTop = (uint32_t)uCurNewRsp;
7583 else
7584 GCPtrTop = (uint16_t)uCurNewRsp;
7585
7586 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7587 0 /* checked in iemMemStackPopBeginSpecial */);
7588}
7589
7590
7591/**
7592 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7593 * iemMemStackPopContinueSpecial).
7594 *
7595 * The caller will manually commit the rSP.
7596 *
7597 * @returns Strict VBox status code.
7598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7599 * @param bUnmapInfo Unmap information returned by
7600 * iemMemStackPopBeginSpecial() or
7601 * iemMemStackPopContinueSpecial().
7602 */
7603VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7604{
7605 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7606}
7607
7608
7609/**
7610 * Fetches a system table byte.
7611 *
7612 * @returns Strict VBox status code.
7613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7614 * @param pbDst Where to return the byte.
7615 * @param iSegReg The index of the segment register to use for
7616 * this access. The base and limits are checked.
7617 * @param GCPtrMem The address of the guest memory.
7618 */
7619VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7620{
7621 /* The lazy approach for now... */
7622 uint8_t bUnmapInfo;
7623 uint8_t const *pbSrc;
7624 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7625 if (rc == VINF_SUCCESS)
7626 {
7627 *pbDst = *pbSrc;
7628 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7629 }
7630 return rc;
7631}
7632
7633
7634/**
7635 * Fetches a system table word.
7636 *
7637 * @returns Strict VBox status code.
7638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7639 * @param pu16Dst Where to return the word.
7640 * @param iSegReg The index of the segment register to use for
7641 * this access. The base and limits are checked.
7642 * @param GCPtrMem The address of the guest memory.
7643 */
7644VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7645{
7646 /* The lazy approach for now... */
7647 uint8_t bUnmapInfo;
7648 uint16_t const *pu16Src;
7649 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7650 if (rc == VINF_SUCCESS)
7651 {
7652 *pu16Dst = *pu16Src;
7653 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7654 }
7655 return rc;
7656}
7657
7658
7659/**
7660 * Fetches a system table dword.
7661 *
7662 * @returns Strict VBox status code.
7663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7664 * @param pu32Dst Where to return the dword.
7665 * @param iSegReg The index of the segment register to use for
7666 * this access. The base and limits are checked.
7667 * @param GCPtrMem The address of the guest memory.
7668 */
7669VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7670{
7671 /* The lazy approach for now... */
7672 uint8_t bUnmapInfo;
7673 uint32_t const *pu32Src;
7674 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7675 if (rc == VINF_SUCCESS)
7676 {
7677 *pu32Dst = *pu32Src;
7678 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7679 }
7680 return rc;
7681}
7682
7683
7684/**
7685 * Fetches a system table qword.
7686 *
7687 * @returns Strict VBox status code.
7688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7689 * @param pu64Dst Where to return the qword.
7690 * @param iSegReg The index of the segment register to use for
7691 * this access. The base and limits are checked.
7692 * @param GCPtrMem The address of the guest memory.
7693 */
7694VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7695{
7696 /* The lazy approach for now... */
7697 uint8_t bUnmapInfo;
7698 uint64_t const *pu64Src;
7699 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7700 if (rc == VINF_SUCCESS)
7701 {
7702 *pu64Dst = *pu64Src;
7703 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7704 }
7705 return rc;
7706}
7707
7708
7709/**
7710 * Fetches a descriptor table entry with caller specified error code.
7711 *
7712 * @returns Strict VBox status code.
7713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7714 * @param pDesc Where to return the descriptor table entry.
7715 * @param uSel The selector which table entry to fetch.
7716 * @param uXcpt The exception to raise on table lookup error.
7717 * @param uErrorCode The error code associated with the exception.
7718 */
7719static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7720 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7721{
7722 AssertPtr(pDesc);
7723 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7724
7725 /** @todo did the 286 require all 8 bytes to be accessible? */
7726 /*
7727 * Get the selector table base and check bounds.
7728 */
7729 RTGCPTR GCPtrBase;
7730 if (uSel & X86_SEL_LDT)
7731 {
7732 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7733 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7734 {
7735 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7736 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7737 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7738 uErrorCode, 0);
7739 }
7740
7741 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7742 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7743 }
7744 else
7745 {
7746 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7747 {
7748 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7749 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7750 uErrorCode, 0);
7751 }
7752 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7753 }
7754
7755 /*
7756 * Read the legacy descriptor and maybe the long mode extensions if
7757 * required.
7758 */
7759 VBOXSTRICTRC rcStrict;
7760 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7761 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7762 else
7763 {
7764 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7765 if (rcStrict == VINF_SUCCESS)
7766 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7767 if (rcStrict == VINF_SUCCESS)
7768 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7769 if (rcStrict == VINF_SUCCESS)
7770 pDesc->Legacy.au16[3] = 0;
7771 else
7772 return rcStrict;
7773 }
7774
7775 if (rcStrict == VINF_SUCCESS)
7776 {
7777 if ( !IEM_IS_LONG_MODE(pVCpu)
7778 || pDesc->Legacy.Gen.u1DescType)
7779 pDesc->Long.au64[1] = 0;
7780 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7781 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7782 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7783 else
7784 {
7785 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7786 /** @todo is this the right exception? */
7787 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7788 }
7789 }
7790 return rcStrict;
7791}
7792
7793
7794/**
7795 * Fetches a descriptor table entry.
7796 *
7797 * @returns Strict VBox status code.
7798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7799 * @param pDesc Where to return the descriptor table entry.
7800 * @param uSel The selector which table entry to fetch.
7801 * @param uXcpt The exception to raise on table lookup error.
7802 */
7803VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
7804{
7805 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
7806}
7807
7808
7809/**
7810 * Marks the selector descriptor as accessed (only non-system descriptors).
7811 *
7812 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7813 * will therefore skip the limit checks.
7814 *
7815 * @returns Strict VBox status code.
7816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7817 * @param uSel The selector.
7818 */
7819VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
7820{
7821 /*
7822 * Get the selector table base and calculate the entry address.
7823 */
7824 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7825 ? pVCpu->cpum.GstCtx.ldtr.u64Base
7826 : pVCpu->cpum.GstCtx.gdtr.pGdt;
7827 GCPtr += uSel & X86_SEL_MASK;
7828
7829 /*
7830 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7831 * ugly stuff to avoid this. This will make sure it's an atomic access
7832 * as well more or less remove any question about 8-bit or 32-bit accesss.
7833 */
7834 VBOXSTRICTRC rcStrict;
7835 uint8_t bUnmapInfo;
7836 uint32_t volatile *pu32;
7837 if ((GCPtr & 3) == 0)
7838 {
7839 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7840 GCPtr += 2 + 2;
7841 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7842 if (rcStrict != VINF_SUCCESS)
7843 return rcStrict;
7844 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7845 }
7846 else
7847 {
7848 /* The misaligned GDT/LDT case, map the whole thing. */
7849 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7850 if (rcStrict != VINF_SUCCESS)
7851 return rcStrict;
7852 switch ((uintptr_t)pu32 & 3)
7853 {
7854 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7855 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7856 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7857 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7858 }
7859 }
7860
7861 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7862}
7863
7864
7865#undef LOG_GROUP
7866#define LOG_GROUP LOG_GROUP_IEM
7867
7868/** @} */
7869
7870/** @name Opcode Helpers.
7871 * @{
7872 */
7873
7874/**
7875 * Calculates the effective address of a ModR/M memory operand.
7876 *
7877 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7878 *
7879 * @return Strict VBox status code.
7880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7881 * @param bRm The ModRM byte.
7882 * @param cbImmAndRspOffset - First byte: The size of any immediate
7883 * following the effective address opcode bytes
7884 * (only for RIP relative addressing).
7885 * - Second byte: RSP displacement (for POP [ESP]).
7886 * @param pGCPtrEff Where to return the effective address.
7887 */
7888VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
7889{
7890 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7891# define SET_SS_DEF() \
7892 do \
7893 { \
7894 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7895 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
7896 } while (0)
7897
7898 if (!IEM_IS_64BIT_CODE(pVCpu))
7899 {
7900/** @todo Check the effective address size crap! */
7901 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
7902 {
7903 uint16_t u16EffAddr;
7904
7905 /* Handle the disp16 form with no registers first. */
7906 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7907 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7908 else
7909 {
7910 /* Get the displacment. */
7911 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7912 {
7913 case 0: u16EffAddr = 0; break;
7914 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7915 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7916 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
7917 }
7918
7919 /* Add the base and index registers to the disp. */
7920 switch (bRm & X86_MODRM_RM_MASK)
7921 {
7922 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
7923 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
7924 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
7925 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
7926 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
7927 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
7928 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
7929 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
7930 }
7931 }
7932
7933 *pGCPtrEff = u16EffAddr;
7934 }
7935 else
7936 {
7937 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
7938 uint32_t u32EffAddr;
7939
7940 /* Handle the disp32 form with no registers first. */
7941 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7942 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7943 else
7944 {
7945 /* Get the register (or SIB) value. */
7946 switch ((bRm & X86_MODRM_RM_MASK))
7947 {
7948 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
7949 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
7950 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
7951 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
7952 case 4: /* SIB */
7953 {
7954 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7955
7956 /* Get the index and scale it. */
7957 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7958 {
7959 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
7960 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
7961 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
7962 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
7963 case 4: u32EffAddr = 0; /*none */ break;
7964 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
7965 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
7966 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
7967 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7968 }
7969 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7970
7971 /* add base */
7972 switch (bSib & X86_SIB_BASE_MASK)
7973 {
7974 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
7975 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
7976 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
7977 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
7978 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
7979 case 5:
7980 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7981 {
7982 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
7983 SET_SS_DEF();
7984 }
7985 else
7986 {
7987 uint32_t u32Disp;
7988 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7989 u32EffAddr += u32Disp;
7990 }
7991 break;
7992 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
7993 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
7994 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7995 }
7996 break;
7997 }
7998 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
7999 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8000 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8002 }
8003
8004 /* Get and add the displacement. */
8005 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8006 {
8007 case 0:
8008 break;
8009 case 1:
8010 {
8011 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8012 u32EffAddr += i8Disp;
8013 break;
8014 }
8015 case 2:
8016 {
8017 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8018 u32EffAddr += u32Disp;
8019 break;
8020 }
8021 default:
8022 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8023 }
8024
8025 }
8026 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8027 *pGCPtrEff = u32EffAddr;
8028 }
8029 }
8030 else
8031 {
8032 uint64_t u64EffAddr;
8033
8034 /* Handle the rip+disp32 form with no registers first. */
8035 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8036 {
8037 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8038 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8039 }
8040 else
8041 {
8042 /* Get the register (or SIB) value. */
8043 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8044 {
8045 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8046 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8047 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8048 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8049 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8050 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8051 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8052 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8053 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8054 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8055 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8056 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8057 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8058 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8059 /* SIB */
8060 case 4:
8061 case 12:
8062 {
8063 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8064
8065 /* Get the index and scale it. */
8066 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8067 {
8068 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8069 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8070 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8071 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8072 case 4: u64EffAddr = 0; /*none */ break;
8073 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8074 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8075 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8076 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8077 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8078 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8079 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8080 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8081 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8082 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8083 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8084 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8085 }
8086 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8087
8088 /* add base */
8089 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8090 {
8091 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8092 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8093 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8094 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8095 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8096 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8097 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8098 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8099 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8100 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8101 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8102 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8103 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8104 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8105 /* complicated encodings */
8106 case 5:
8107 case 13:
8108 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8109 {
8110 if (!pVCpu->iem.s.uRexB)
8111 {
8112 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8113 SET_SS_DEF();
8114 }
8115 else
8116 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8117 }
8118 else
8119 {
8120 uint32_t u32Disp;
8121 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8122 u64EffAddr += (int32_t)u32Disp;
8123 }
8124 break;
8125 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8126 }
8127 break;
8128 }
8129 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8130 }
8131
8132 /* Get and add the displacement. */
8133 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8134 {
8135 case 0:
8136 break;
8137 case 1:
8138 {
8139 int8_t i8Disp;
8140 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8141 u64EffAddr += i8Disp;
8142 break;
8143 }
8144 case 2:
8145 {
8146 uint32_t u32Disp;
8147 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8148 u64EffAddr += (int32_t)u32Disp;
8149 break;
8150 }
8151 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8152 }
8153
8154 }
8155
8156 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8157 *pGCPtrEff = u64EffAddr;
8158 else
8159 {
8160 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8161 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8162 }
8163 }
8164
8165 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8166 return VINF_SUCCESS;
8167}
8168
8169
8170#ifdef IEM_WITH_SETJMP
8171/**
8172 * Calculates the effective address of a ModR/M memory operand.
8173 *
8174 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8175 *
8176 * May longjmp on internal error.
8177 *
8178 * @return The effective address.
8179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8180 * @param bRm The ModRM byte.
8181 * @param cbImmAndRspOffset - First byte: The size of any immediate
8182 * following the effective address opcode bytes
8183 * (only for RIP relative addressing).
8184 * - Second byte: RSP displacement (for POP [ESP]).
8185 */
8186RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8187{
8188 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8189# define SET_SS_DEF() \
8190 do \
8191 { \
8192 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8193 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8194 } while (0)
8195
8196 if (!IEM_IS_64BIT_CODE(pVCpu))
8197 {
8198/** @todo Check the effective address size crap! */
8199 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8200 {
8201 uint16_t u16EffAddr;
8202
8203 /* Handle the disp16 form with no registers first. */
8204 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8205 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8206 else
8207 {
8208 /* Get the displacment. */
8209 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8210 {
8211 case 0: u16EffAddr = 0; break;
8212 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8213 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8214 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8215 }
8216
8217 /* Add the base and index registers to the disp. */
8218 switch (bRm & X86_MODRM_RM_MASK)
8219 {
8220 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8221 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8222 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8223 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8224 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8225 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8226 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8227 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8228 }
8229 }
8230
8231 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8232 return u16EffAddr;
8233 }
8234
8235 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8236 uint32_t u32EffAddr;
8237
8238 /* Handle the disp32 form with no registers first. */
8239 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8240 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8241 else
8242 {
8243 /* Get the register (or SIB) value. */
8244 switch ((bRm & X86_MODRM_RM_MASK))
8245 {
8246 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8247 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8248 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8249 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8250 case 4: /* SIB */
8251 {
8252 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8253
8254 /* Get the index and scale it. */
8255 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8256 {
8257 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8258 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8259 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8260 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8261 case 4: u32EffAddr = 0; /*none */ break;
8262 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8263 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8264 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8265 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8266 }
8267 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8268
8269 /* add base */
8270 switch (bSib & X86_SIB_BASE_MASK)
8271 {
8272 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8273 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8274 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8275 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8276 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8277 case 5:
8278 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8279 {
8280 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8281 SET_SS_DEF();
8282 }
8283 else
8284 {
8285 uint32_t u32Disp;
8286 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8287 u32EffAddr += u32Disp;
8288 }
8289 break;
8290 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8291 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8292 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8293 }
8294 break;
8295 }
8296 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8297 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8298 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8299 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8300 }
8301
8302 /* Get and add the displacement. */
8303 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8304 {
8305 case 0:
8306 break;
8307 case 1:
8308 {
8309 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8310 u32EffAddr += i8Disp;
8311 break;
8312 }
8313 case 2:
8314 {
8315 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8316 u32EffAddr += u32Disp;
8317 break;
8318 }
8319 default:
8320 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8321 }
8322 }
8323
8324 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8325 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8326 return u32EffAddr;
8327 }
8328
8329 uint64_t u64EffAddr;
8330
8331 /* Handle the rip+disp32 form with no registers first. */
8332 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8333 {
8334 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8335 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8336 }
8337 else
8338 {
8339 /* Get the register (or SIB) value. */
8340 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8341 {
8342 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8343 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8344 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8345 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8346 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8347 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8348 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8349 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8350 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8351 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8352 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8353 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8354 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8355 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8356 /* SIB */
8357 case 4:
8358 case 12:
8359 {
8360 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8361
8362 /* Get the index and scale it. */
8363 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8364 {
8365 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8366 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8367 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8368 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8369 case 4: u64EffAddr = 0; /*none */ break;
8370 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8371 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8372 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8373 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8374 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8375 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8376 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8377 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8378 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8379 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8380 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8381 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8382 }
8383 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8384
8385 /* add base */
8386 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8387 {
8388 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8389 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8390 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8391 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8392 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8393 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8394 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8395 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8396 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8397 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8398 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8399 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8400 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8401 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8402 /* complicated encodings */
8403 case 5:
8404 case 13:
8405 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8406 {
8407 if (!pVCpu->iem.s.uRexB)
8408 {
8409 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8410 SET_SS_DEF();
8411 }
8412 else
8413 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8414 }
8415 else
8416 {
8417 uint32_t u32Disp;
8418 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8419 u64EffAddr += (int32_t)u32Disp;
8420 }
8421 break;
8422 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8423 }
8424 break;
8425 }
8426 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8427 }
8428
8429 /* Get and add the displacement. */
8430 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8431 {
8432 case 0:
8433 break;
8434 case 1:
8435 {
8436 int8_t i8Disp;
8437 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8438 u64EffAddr += i8Disp;
8439 break;
8440 }
8441 case 2:
8442 {
8443 uint32_t u32Disp;
8444 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8445 u64EffAddr += (int32_t)u32Disp;
8446 break;
8447 }
8448 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8449 }
8450
8451 }
8452
8453 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8454 {
8455 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8456 return u64EffAddr;
8457 }
8458 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8459 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8460 return u64EffAddr & UINT32_MAX;
8461}
8462#endif /* IEM_WITH_SETJMP */
8463
8464
8465/**
8466 * Calculates the effective address of a ModR/M memory operand, extended version
8467 * for use in the recompilers.
8468 *
8469 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8470 *
8471 * @return Strict VBox status code.
8472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8473 * @param bRm The ModRM byte.
8474 * @param cbImmAndRspOffset - First byte: The size of any immediate
8475 * following the effective address opcode bytes
8476 * (only for RIP relative addressing).
8477 * - Second byte: RSP displacement (for POP [ESP]).
8478 * @param pGCPtrEff Where to return the effective address.
8479 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8480 * SIB byte (bits 39:32).
8481 */
8482VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8483{
8484 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8485# define SET_SS_DEF() \
8486 do \
8487 { \
8488 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8489 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8490 } while (0)
8491
8492 uint64_t uInfo;
8493 if (!IEM_IS_64BIT_CODE(pVCpu))
8494 {
8495/** @todo Check the effective address size crap! */
8496 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8497 {
8498 uint16_t u16EffAddr;
8499
8500 /* Handle the disp16 form with no registers first. */
8501 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8502 {
8503 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8504 uInfo = u16EffAddr;
8505 }
8506 else
8507 {
8508 /* Get the displacment. */
8509 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8510 {
8511 case 0: u16EffAddr = 0; break;
8512 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8513 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8514 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8515 }
8516 uInfo = u16EffAddr;
8517
8518 /* Add the base and index registers to the disp. */
8519 switch (bRm & X86_MODRM_RM_MASK)
8520 {
8521 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8522 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8523 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8524 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8525 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8526 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8527 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8528 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8529 }
8530 }
8531
8532 *pGCPtrEff = u16EffAddr;
8533 }
8534 else
8535 {
8536 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8537 uint32_t u32EffAddr;
8538
8539 /* Handle the disp32 form with no registers first. */
8540 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8541 {
8542 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8543 uInfo = u32EffAddr;
8544 }
8545 else
8546 {
8547 /* Get the register (or SIB) value. */
8548 uInfo = 0;
8549 switch ((bRm & X86_MODRM_RM_MASK))
8550 {
8551 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8552 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8553 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8554 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8555 case 4: /* SIB */
8556 {
8557 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8558 uInfo = (uint64_t)bSib << 32;
8559
8560 /* Get the index and scale it. */
8561 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8562 {
8563 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8564 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8565 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8566 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8567 case 4: u32EffAddr = 0; /*none */ break;
8568 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8569 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8570 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8571 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8572 }
8573 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8574
8575 /* add base */
8576 switch (bSib & X86_SIB_BASE_MASK)
8577 {
8578 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8579 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8580 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8581 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8582 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8583 case 5:
8584 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8585 {
8586 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8587 SET_SS_DEF();
8588 }
8589 else
8590 {
8591 uint32_t u32Disp;
8592 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8593 u32EffAddr += u32Disp;
8594 uInfo |= u32Disp;
8595 }
8596 break;
8597 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8598 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8599 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8600 }
8601 break;
8602 }
8603 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8604 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8605 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8606 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8607 }
8608
8609 /* Get and add the displacement. */
8610 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8611 {
8612 case 0:
8613 break;
8614 case 1:
8615 {
8616 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8617 u32EffAddr += i8Disp;
8618 uInfo |= (uint32_t)(int32_t)i8Disp;
8619 break;
8620 }
8621 case 2:
8622 {
8623 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8624 u32EffAddr += u32Disp;
8625 uInfo |= (uint32_t)u32Disp;
8626 break;
8627 }
8628 default:
8629 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8630 }
8631
8632 }
8633 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8634 *pGCPtrEff = u32EffAddr;
8635 }
8636 }
8637 else
8638 {
8639 uint64_t u64EffAddr;
8640
8641 /* Handle the rip+disp32 form with no registers first. */
8642 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8643 {
8644 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8645 uInfo = (uint32_t)u64EffAddr;
8646 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8647 }
8648 else
8649 {
8650 /* Get the register (or SIB) value. */
8651 uInfo = 0;
8652 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8653 {
8654 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8655 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8656 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8657 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8658 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8659 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8660 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8661 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8662 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8663 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8664 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8665 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8666 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8667 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8668 /* SIB */
8669 case 4:
8670 case 12:
8671 {
8672 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8673 uInfo = (uint64_t)bSib << 32;
8674
8675 /* Get the index and scale it. */
8676 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8677 {
8678 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8679 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8680 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8681 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8682 case 4: u64EffAddr = 0; /*none */ break;
8683 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8684 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8685 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8686 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8687 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8688 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8689 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8690 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8691 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8692 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8693 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8695 }
8696 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8697
8698 /* add base */
8699 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8700 {
8701 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8702 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8703 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8704 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8705 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8706 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8707 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8708 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8709 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8710 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8711 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8712 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8713 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8714 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8715 /* complicated encodings */
8716 case 5:
8717 case 13:
8718 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8719 {
8720 if (!pVCpu->iem.s.uRexB)
8721 {
8722 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8723 SET_SS_DEF();
8724 }
8725 else
8726 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8727 }
8728 else
8729 {
8730 uint32_t u32Disp;
8731 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8732 u64EffAddr += (int32_t)u32Disp;
8733 uInfo |= u32Disp;
8734 }
8735 break;
8736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8737 }
8738 break;
8739 }
8740 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8741 }
8742
8743 /* Get and add the displacement. */
8744 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8745 {
8746 case 0:
8747 break;
8748 case 1:
8749 {
8750 int8_t i8Disp;
8751 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8752 u64EffAddr += i8Disp;
8753 uInfo |= (uint32_t)(int32_t)i8Disp;
8754 break;
8755 }
8756 case 2:
8757 {
8758 uint32_t u32Disp;
8759 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8760 u64EffAddr += (int32_t)u32Disp;
8761 uInfo |= u32Disp;
8762 break;
8763 }
8764 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8765 }
8766
8767 }
8768
8769 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8770 *pGCPtrEff = u64EffAddr;
8771 else
8772 {
8773 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8774 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8775 }
8776 }
8777 *puInfo = uInfo;
8778
8779 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8780 return VINF_SUCCESS;
8781}
8782
8783/** @} */
8784
8785
8786#ifdef LOG_ENABLED
8787/**
8788 * Logs the current instruction.
8789 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8790 * @param fSameCtx Set if we have the same context information as the VMM,
8791 * clear if we may have already executed an instruction in
8792 * our debug context. When clear, we assume IEMCPU holds
8793 * valid CPU mode info.
8794 *
8795 * The @a fSameCtx parameter is now misleading and obsolete.
8796 * @param pszFunction The IEM function doing the execution.
8797 */
8798static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8799{
8800# ifdef IN_RING3
8801 if (LogIs2Enabled())
8802 {
8803 char szInstr[256];
8804 uint32_t cbInstr = 0;
8805 if (fSameCtx)
8806 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8807 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8808 szInstr, sizeof(szInstr), &cbInstr);
8809 else
8810 {
8811 uint32_t fFlags = 0;
8812 switch (IEM_GET_CPU_MODE(pVCpu))
8813 {
8814 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
8815 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
8816 case IEMMODE_16BIT:
8817 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
8818 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
8819 else
8820 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
8821 break;
8822 }
8823 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
8824 szInstr, sizeof(szInstr), &cbInstr);
8825 }
8826
8827 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8828 Log2(("**** %s fExec=%x\n"
8829 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8830 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
8831 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8832 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8833 " %s\n"
8834 , pszFunction, pVCpu->iem.s.fExec,
8835 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
8836 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
8837 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
8838 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
8839 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
8840 szInstr));
8841
8842 /* This stuff sucks atm. as it fills the log with MSRs. */
8843 //if (LogIs3Enabled())
8844 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
8845 }
8846 else
8847# endif
8848 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
8849 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
8850 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
8851}
8852#endif /* LOG_ENABLED */
8853
8854
8855#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8856/**
8857 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
8858 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
8859 *
8860 * @returns Modified rcStrict.
8861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8862 * @param rcStrict The instruction execution status.
8863 */
8864static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
8865{
8866 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
8867 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
8868 {
8869 /* VMX preemption timer takes priority over NMI-window exits. */
8870 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
8871 {
8872 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
8873 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
8874 }
8875 /*
8876 * Check remaining intercepts.
8877 *
8878 * NMI-window and Interrupt-window VM-exits.
8879 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
8880 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
8881 *
8882 * See Intel spec. 26.7.6 "NMI-Window Exiting".
8883 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
8884 */
8885 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
8886 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
8887 && !TRPMHasTrap(pVCpu))
8888 {
8889 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
8890 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
8891 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
8892 {
8893 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
8894 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
8895 }
8896 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
8897 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
8898 {
8899 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
8900 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
8901 }
8902 }
8903 }
8904 /* TPR-below threshold/APIC write has the highest priority. */
8905 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
8906 {
8907 rcStrict = iemVmxApicWriteEmulation(pVCpu);
8908 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
8909 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
8910 }
8911 /* MTF takes priority over VMX-preemption timer. */
8912 else
8913 {
8914 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
8915 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
8916 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
8917 }
8918 return rcStrict;
8919}
8920#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8921
8922
8923/**
8924 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8925 * IEMExecOneWithPrefetchedByPC.
8926 *
8927 * Similar code is found in IEMExecLots.
8928 *
8929 * @return Strict VBox status code.
8930 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8931 * @param fExecuteInhibit If set, execute the instruction following CLI,
8932 * POP SS and MOV SS,GR.
8933 * @param pszFunction The calling function name.
8934 */
8935DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
8936{
8937 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
8938 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
8939 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
8940 RT_NOREF_PV(pszFunction);
8941
8942#ifdef IEM_WITH_SETJMP
8943 VBOXSTRICTRC rcStrict;
8944 IEM_TRY_SETJMP(pVCpu, rcStrict)
8945 {
8946 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8947 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8948 }
8949 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
8950 {
8951 pVCpu->iem.s.cLongJumps++;
8952 }
8953 IEM_CATCH_LONGJMP_END(pVCpu);
8954#else
8955 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8956 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8957#endif
8958 if (rcStrict == VINF_SUCCESS)
8959 pVCpu->iem.s.cInstructions++;
8960 if (pVCpu->iem.s.cActiveMappings > 0)
8961 {
8962 Assert(rcStrict != VINF_SUCCESS);
8963 iemMemRollback(pVCpu);
8964 }
8965 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
8966 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
8967 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
8968
8969//#ifdef DEBUG
8970// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
8971//#endif
8972
8973#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8974 /*
8975 * Perform any VMX nested-guest instruction boundary actions.
8976 *
8977 * If any of these causes a VM-exit, we must skip executing the next
8978 * instruction (would run into stale page tables). A VM-exit makes sure
8979 * there is no interrupt-inhibition, so that should ensure we don't go
8980 * to try execute the next instruction. Clearing fExecuteInhibit is
8981 * problematic because of the setjmp/longjmp clobbering above.
8982 */
8983 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
8984 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
8985 || rcStrict != VINF_SUCCESS)
8986 { /* likely */ }
8987 else
8988 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
8989#endif
8990
8991 /* Execute the next instruction as well if a cli, pop ss or
8992 mov ss, Gr has just completed successfully. */
8993 if ( fExecuteInhibit
8994 && rcStrict == VINF_SUCCESS
8995 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
8996 {
8997 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
8998 if (rcStrict == VINF_SUCCESS)
8999 {
9000#ifdef LOG_ENABLED
9001 iemLogCurInstr(pVCpu, false, pszFunction);
9002#endif
9003#ifdef IEM_WITH_SETJMP
9004 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9005 {
9006 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9007 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9008 }
9009 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9010 {
9011 pVCpu->iem.s.cLongJumps++;
9012 }
9013 IEM_CATCH_LONGJMP_END(pVCpu);
9014#else
9015 IEM_OPCODE_GET_FIRST_U8(&b);
9016 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9017#endif
9018 if (rcStrict == VINF_SUCCESS)
9019 {
9020 pVCpu->iem.s.cInstructions++;
9021#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9022 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9023 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9024 { /* likely */ }
9025 else
9026 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9027#endif
9028 }
9029 if (pVCpu->iem.s.cActiveMappings > 0)
9030 {
9031 Assert(rcStrict != VINF_SUCCESS);
9032 iemMemRollback(pVCpu);
9033 }
9034 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9035 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9036 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9037 }
9038 else if (pVCpu->iem.s.cActiveMappings > 0)
9039 iemMemRollback(pVCpu);
9040 /** @todo drop this after we bake this change into RIP advancing. */
9041 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9042 }
9043
9044 /*
9045 * Return value fiddling, statistics and sanity assertions.
9046 */
9047 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9048
9049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9050 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9051 return rcStrict;
9052}
9053
9054
9055/**
9056 * Execute one instruction.
9057 *
9058 * @return Strict VBox status code.
9059 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9060 */
9061VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9062{
9063 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9064#ifdef LOG_ENABLED
9065 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9066#endif
9067
9068 /*
9069 * Do the decoding and emulation.
9070 */
9071 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9072 if (rcStrict == VINF_SUCCESS)
9073 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9074 else if (pVCpu->iem.s.cActiveMappings > 0)
9075 iemMemRollback(pVCpu);
9076
9077 if (rcStrict != VINF_SUCCESS)
9078 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9079 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9080 return rcStrict;
9081}
9082
9083
9084VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9085{
9086 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9087 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9088 if (rcStrict == VINF_SUCCESS)
9089 {
9090 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9091 if (pcbWritten)
9092 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9093 }
9094 else if (pVCpu->iem.s.cActiveMappings > 0)
9095 iemMemRollback(pVCpu);
9096
9097 return rcStrict;
9098}
9099
9100
9101VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9102 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9103{
9104 VBOXSTRICTRC rcStrict;
9105 if ( cbOpcodeBytes
9106 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9107 {
9108 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9109#ifdef IEM_WITH_CODE_TLB
9110 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9111 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9112 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9113 pVCpu->iem.s.offCurInstrStart = 0;
9114 pVCpu->iem.s.offInstrNextByte = 0;
9115 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9116#else
9117 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9118 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9119#endif
9120 rcStrict = VINF_SUCCESS;
9121 }
9122 else
9123 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9124 if (rcStrict == VINF_SUCCESS)
9125 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9126 else if (pVCpu->iem.s.cActiveMappings > 0)
9127 iemMemRollback(pVCpu);
9128
9129 return rcStrict;
9130}
9131
9132
9133VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9134{
9135 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9136 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9137 if (rcStrict == VINF_SUCCESS)
9138 {
9139 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9140 if (pcbWritten)
9141 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9142 }
9143 else if (pVCpu->iem.s.cActiveMappings > 0)
9144 iemMemRollback(pVCpu);
9145
9146 return rcStrict;
9147}
9148
9149
9150VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9151 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9152{
9153 VBOXSTRICTRC rcStrict;
9154 if ( cbOpcodeBytes
9155 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9156 {
9157 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9158#ifdef IEM_WITH_CODE_TLB
9159 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9160 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9161 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9162 pVCpu->iem.s.offCurInstrStart = 0;
9163 pVCpu->iem.s.offInstrNextByte = 0;
9164 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9165#else
9166 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9167 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9168#endif
9169 rcStrict = VINF_SUCCESS;
9170 }
9171 else
9172 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9173 if (rcStrict == VINF_SUCCESS)
9174 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9175 else if (pVCpu->iem.s.cActiveMappings > 0)
9176 iemMemRollback(pVCpu);
9177
9178 return rcStrict;
9179}
9180
9181
9182/**
9183 * For handling split cacheline lock operations when the host has split-lock
9184 * detection enabled.
9185 *
9186 * This will cause the interpreter to disregard the lock prefix and implicit
9187 * locking (xchg).
9188 *
9189 * @returns Strict VBox status code.
9190 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9191 */
9192VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9193{
9194 /*
9195 * Do the decoding and emulation.
9196 */
9197 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9198 if (rcStrict == VINF_SUCCESS)
9199 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9200 else if (pVCpu->iem.s.cActiveMappings > 0)
9201 iemMemRollback(pVCpu);
9202
9203 if (rcStrict != VINF_SUCCESS)
9204 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9205 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9206 return rcStrict;
9207}
9208
9209
9210/**
9211 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9212 * inject a pending TRPM trap.
9213 */
9214VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9215{
9216 Assert(TRPMHasTrap(pVCpu));
9217
9218 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9219 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9220 {
9221 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9222#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9223 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9224 if (fIntrEnabled)
9225 {
9226 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9227 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9228 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9229 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9230 else
9231 {
9232 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9233 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9234 }
9235 }
9236#else
9237 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9238#endif
9239 if (fIntrEnabled)
9240 {
9241 uint8_t u8TrapNo;
9242 TRPMEVENT enmType;
9243 uint32_t uErrCode;
9244 RTGCPTR uCr2;
9245 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9246 AssertRC(rc2);
9247 Assert(enmType == TRPM_HARDWARE_INT);
9248 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9249
9250 TRPMResetTrap(pVCpu);
9251
9252#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9253 /* Injecting an event may cause a VM-exit. */
9254 if ( rcStrict != VINF_SUCCESS
9255 && rcStrict != VINF_IEM_RAISED_XCPT)
9256 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9257#else
9258 NOREF(rcStrict);
9259#endif
9260 }
9261 }
9262
9263 return VINF_SUCCESS;
9264}
9265
9266
9267VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9268{
9269 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9270 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9271 Assert(cMaxInstructions > 0);
9272
9273 /*
9274 * See if there is an interrupt pending in TRPM, inject it if we can.
9275 */
9276 /** @todo What if we are injecting an exception and not an interrupt? Is that
9277 * possible here? For now we assert it is indeed only an interrupt. */
9278 if (!TRPMHasTrap(pVCpu))
9279 { /* likely */ }
9280 else
9281 {
9282 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9283 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9284 { /*likely */ }
9285 else
9286 return rcStrict;
9287 }
9288
9289 /*
9290 * Initial decoder init w/ prefetch, then setup setjmp.
9291 */
9292 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9293 if (rcStrict == VINF_SUCCESS)
9294 {
9295#ifdef IEM_WITH_SETJMP
9296 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9297 IEM_TRY_SETJMP(pVCpu, rcStrict)
9298#endif
9299 {
9300 /*
9301 * The run loop. We limit ourselves to 4096 instructions right now.
9302 */
9303 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9304 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9305 for (;;)
9306 {
9307 /*
9308 * Log the state.
9309 */
9310#ifdef LOG_ENABLED
9311 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9312#endif
9313
9314 /*
9315 * Do the decoding and emulation.
9316 */
9317 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9318 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9319#ifdef VBOX_STRICT
9320 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9321#endif
9322 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9323 {
9324 Assert(pVCpu->iem.s.cActiveMappings == 0);
9325 pVCpu->iem.s.cInstructions++;
9326
9327#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9328 /* Perform any VMX nested-guest instruction boundary actions. */
9329 uint64_t fCpu = pVCpu->fLocalForcedActions;
9330 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9331 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9332 { /* likely */ }
9333 else
9334 {
9335 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9336 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9337 fCpu = pVCpu->fLocalForcedActions;
9338 else
9339 {
9340 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9341 break;
9342 }
9343 }
9344#endif
9345 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9346 {
9347#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9348 uint64_t fCpu = pVCpu->fLocalForcedActions;
9349#endif
9350 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9351 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9352 | VMCPU_FF_TLB_FLUSH
9353 | VMCPU_FF_UNHALT );
9354
9355 if (RT_LIKELY( ( !fCpu
9356 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9357 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9358 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9359 {
9360 if (--cMaxInstructionsGccStupidity > 0)
9361 {
9362 /* Poll timers every now an then according to the caller's specs. */
9363 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9364 || !TMTimerPollBool(pVM, pVCpu))
9365 {
9366 Assert(pVCpu->iem.s.cActiveMappings == 0);
9367 iemReInitDecoder(pVCpu);
9368 continue;
9369 }
9370 }
9371 }
9372 }
9373 Assert(pVCpu->iem.s.cActiveMappings == 0);
9374 }
9375 else if (pVCpu->iem.s.cActiveMappings > 0)
9376 iemMemRollback(pVCpu);
9377 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9378 break;
9379 }
9380 }
9381#ifdef IEM_WITH_SETJMP
9382 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9383 {
9384 if (pVCpu->iem.s.cActiveMappings > 0)
9385 iemMemRollback(pVCpu);
9386# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9387 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9388# endif
9389 pVCpu->iem.s.cLongJumps++;
9390 }
9391 IEM_CATCH_LONGJMP_END(pVCpu);
9392#endif
9393
9394 /*
9395 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9396 */
9397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9399 }
9400 else
9401 {
9402 if (pVCpu->iem.s.cActiveMappings > 0)
9403 iemMemRollback(pVCpu);
9404
9405#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9406 /*
9407 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9408 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9409 */
9410 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9411#endif
9412 }
9413
9414 /*
9415 * Maybe re-enter raw-mode and log.
9416 */
9417 if (rcStrict != VINF_SUCCESS)
9418 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9419 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9420 if (pcInstructions)
9421 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9422 return rcStrict;
9423}
9424
9425
9426/**
9427 * Interface used by EMExecuteExec, does exit statistics and limits.
9428 *
9429 * @returns Strict VBox status code.
9430 * @param pVCpu The cross context virtual CPU structure.
9431 * @param fWillExit To be defined.
9432 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9433 * @param cMaxInstructions Maximum number of instructions to execute.
9434 * @param cMaxInstructionsWithoutExits
9435 * The max number of instructions without exits.
9436 * @param pStats Where to return statistics.
9437 */
9438VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9439 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9440{
9441 NOREF(fWillExit); /** @todo define flexible exit crits */
9442
9443 /*
9444 * Initialize return stats.
9445 */
9446 pStats->cInstructions = 0;
9447 pStats->cExits = 0;
9448 pStats->cMaxExitDistance = 0;
9449 pStats->cReserved = 0;
9450
9451 /*
9452 * Initial decoder init w/ prefetch, then setup setjmp.
9453 */
9454 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9455 if (rcStrict == VINF_SUCCESS)
9456 {
9457#ifdef IEM_WITH_SETJMP
9458 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9459 IEM_TRY_SETJMP(pVCpu, rcStrict)
9460#endif
9461 {
9462#ifdef IN_RING0
9463 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9464#endif
9465 uint32_t cInstructionSinceLastExit = 0;
9466
9467 /*
9468 * The run loop. We limit ourselves to 4096 instructions right now.
9469 */
9470 PVM pVM = pVCpu->CTX_SUFF(pVM);
9471 for (;;)
9472 {
9473 /*
9474 * Log the state.
9475 */
9476#ifdef LOG_ENABLED
9477 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9478#endif
9479
9480 /*
9481 * Do the decoding and emulation.
9482 */
9483 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9484
9485 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9486 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9487
9488 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9489 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9490 {
9491 pStats->cExits += 1;
9492 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9493 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9494 cInstructionSinceLastExit = 0;
9495 }
9496
9497 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9498 {
9499 Assert(pVCpu->iem.s.cActiveMappings == 0);
9500 pVCpu->iem.s.cInstructions++;
9501 pStats->cInstructions++;
9502 cInstructionSinceLastExit++;
9503
9504#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9505 /* Perform any VMX nested-guest instruction boundary actions. */
9506 uint64_t fCpu = pVCpu->fLocalForcedActions;
9507 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9508 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9509 { /* likely */ }
9510 else
9511 {
9512 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9513 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9514 fCpu = pVCpu->fLocalForcedActions;
9515 else
9516 {
9517 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9518 break;
9519 }
9520 }
9521#endif
9522 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9523 {
9524#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9525 uint64_t fCpu = pVCpu->fLocalForcedActions;
9526#endif
9527 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9528 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9529 | VMCPU_FF_TLB_FLUSH
9530 | VMCPU_FF_UNHALT );
9531 if (RT_LIKELY( ( ( !fCpu
9532 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9533 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9534 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9535 || pStats->cInstructions < cMinInstructions))
9536 {
9537 if (pStats->cInstructions < cMaxInstructions)
9538 {
9539 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9540 {
9541#ifdef IN_RING0
9542 if ( !fCheckPreemptionPending
9543 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9544#endif
9545 {
9546 Assert(pVCpu->iem.s.cActiveMappings == 0);
9547 iemReInitDecoder(pVCpu);
9548 continue;
9549 }
9550#ifdef IN_RING0
9551 rcStrict = VINF_EM_RAW_INTERRUPT;
9552 break;
9553#endif
9554 }
9555 }
9556 }
9557 Assert(!(fCpu & VMCPU_FF_IEM));
9558 }
9559 Assert(pVCpu->iem.s.cActiveMappings == 0);
9560 }
9561 else if (pVCpu->iem.s.cActiveMappings > 0)
9562 iemMemRollback(pVCpu);
9563 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9564 break;
9565 }
9566 }
9567#ifdef IEM_WITH_SETJMP
9568 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9569 {
9570 if (pVCpu->iem.s.cActiveMappings > 0)
9571 iemMemRollback(pVCpu);
9572 pVCpu->iem.s.cLongJumps++;
9573 }
9574 IEM_CATCH_LONGJMP_END(pVCpu);
9575#endif
9576
9577 /*
9578 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9579 */
9580 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9582 }
9583 else
9584 {
9585 if (pVCpu->iem.s.cActiveMappings > 0)
9586 iemMemRollback(pVCpu);
9587
9588#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9589 /*
9590 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9591 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9592 */
9593 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9594#endif
9595 }
9596
9597 /*
9598 * Maybe re-enter raw-mode and log.
9599 */
9600 if (rcStrict != VINF_SUCCESS)
9601 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9602 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9603 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9604 return rcStrict;
9605}
9606
9607
9608/**
9609 * Injects a trap, fault, abort, software interrupt or external interrupt.
9610 *
9611 * The parameter list matches TRPMQueryTrapAll pretty closely.
9612 *
9613 * @returns Strict VBox status code.
9614 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9615 * @param u8TrapNo The trap number.
9616 * @param enmType What type is it (trap/fault/abort), software
9617 * interrupt or hardware interrupt.
9618 * @param uErrCode The error code if applicable.
9619 * @param uCr2 The CR2 value if applicable.
9620 * @param cbInstr The instruction length (only relevant for
9621 * software interrupts).
9622 */
9623VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9624 uint8_t cbInstr)
9625{
9626 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9627#ifdef DBGFTRACE_ENABLED
9628 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9629 u8TrapNo, enmType, uErrCode, uCr2);
9630#endif
9631
9632 uint32_t fFlags;
9633 switch (enmType)
9634 {
9635 case TRPM_HARDWARE_INT:
9636 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9637 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9638 uErrCode = uCr2 = 0;
9639 break;
9640
9641 case TRPM_SOFTWARE_INT:
9642 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9643 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9644 uErrCode = uCr2 = 0;
9645 break;
9646
9647 case TRPM_TRAP:
9648 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9649 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9650 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9651 if (u8TrapNo == X86_XCPT_PF)
9652 fFlags |= IEM_XCPT_FLAGS_CR2;
9653 switch (u8TrapNo)
9654 {
9655 case X86_XCPT_DF:
9656 case X86_XCPT_TS:
9657 case X86_XCPT_NP:
9658 case X86_XCPT_SS:
9659 case X86_XCPT_PF:
9660 case X86_XCPT_AC:
9661 case X86_XCPT_GP:
9662 fFlags |= IEM_XCPT_FLAGS_ERR;
9663 break;
9664 }
9665 break;
9666
9667 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9668 }
9669
9670 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9671
9672 if (pVCpu->iem.s.cActiveMappings > 0)
9673 iemMemRollback(pVCpu);
9674
9675 return rcStrict;
9676}
9677
9678
9679/**
9680 * Injects the active TRPM event.
9681 *
9682 * @returns Strict VBox status code.
9683 * @param pVCpu The cross context virtual CPU structure.
9684 */
9685VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9686{
9687#ifndef IEM_IMPLEMENTS_TASKSWITCH
9688 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9689#else
9690 uint8_t u8TrapNo;
9691 TRPMEVENT enmType;
9692 uint32_t uErrCode;
9693 RTGCUINTPTR uCr2;
9694 uint8_t cbInstr;
9695 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9696 if (RT_FAILURE(rc))
9697 return rc;
9698
9699 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9700 * ICEBP \#DB injection as a special case. */
9701 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9702#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9703 if (rcStrict == VINF_SVM_VMEXIT)
9704 rcStrict = VINF_SUCCESS;
9705#endif
9706#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9707 if (rcStrict == VINF_VMX_VMEXIT)
9708 rcStrict = VINF_SUCCESS;
9709#endif
9710 /** @todo Are there any other codes that imply the event was successfully
9711 * delivered to the guest? See @bugref{6607}. */
9712 if ( rcStrict == VINF_SUCCESS
9713 || rcStrict == VINF_IEM_RAISED_XCPT)
9714 TRPMResetTrap(pVCpu);
9715
9716 return rcStrict;
9717#endif
9718}
9719
9720
9721VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9722{
9723 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9724 return VERR_NOT_IMPLEMENTED;
9725}
9726
9727
9728VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9729{
9730 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9731 return VERR_NOT_IMPLEMENTED;
9732}
9733
9734
9735/**
9736 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9737 *
9738 * This API ASSUMES that the caller has already verified that the guest code is
9739 * allowed to access the I/O port. (The I/O port is in the DX register in the
9740 * guest state.)
9741 *
9742 * @returns Strict VBox status code.
9743 * @param pVCpu The cross context virtual CPU structure.
9744 * @param cbValue The size of the I/O port access (1, 2, or 4).
9745 * @param enmAddrMode The addressing mode.
9746 * @param fRepPrefix Indicates whether a repeat prefix is used
9747 * (doesn't matter which for this instruction).
9748 * @param cbInstr The instruction length in bytes.
9749 * @param iEffSeg The effective segment address.
9750 * @param fIoChecked Whether the access to the I/O port has been
9751 * checked or not. It's typically checked in the
9752 * HM scenario.
9753 */
9754VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9755 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9756{
9757 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9758 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9759
9760 /*
9761 * State init.
9762 */
9763 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9764
9765 /*
9766 * Switch orgy for getting to the right handler.
9767 */
9768 VBOXSTRICTRC rcStrict;
9769 if (fRepPrefix)
9770 {
9771 switch (enmAddrMode)
9772 {
9773 case IEMMODE_16BIT:
9774 switch (cbValue)
9775 {
9776 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9777 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9778 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9779 default:
9780 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9781 }
9782 break;
9783
9784 case IEMMODE_32BIT:
9785 switch (cbValue)
9786 {
9787 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9788 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9789 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9790 default:
9791 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9792 }
9793 break;
9794
9795 case IEMMODE_64BIT:
9796 switch (cbValue)
9797 {
9798 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9799 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9800 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9801 default:
9802 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9803 }
9804 break;
9805
9806 default:
9807 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9808 }
9809 }
9810 else
9811 {
9812 switch (enmAddrMode)
9813 {
9814 case IEMMODE_16BIT:
9815 switch (cbValue)
9816 {
9817 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9818 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9819 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9820 default:
9821 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9822 }
9823 break;
9824
9825 case IEMMODE_32BIT:
9826 switch (cbValue)
9827 {
9828 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9829 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9830 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9831 default:
9832 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9833 }
9834 break;
9835
9836 case IEMMODE_64BIT:
9837 switch (cbValue)
9838 {
9839 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9840 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9841 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9842 default:
9843 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9844 }
9845 break;
9846
9847 default:
9848 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9849 }
9850 }
9851
9852 if (pVCpu->iem.s.cActiveMappings)
9853 iemMemRollback(pVCpu);
9854
9855 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9856}
9857
9858
9859/**
9860 * Interface for HM and EM for executing string I/O IN (read) instructions.
9861 *
9862 * This API ASSUMES that the caller has already verified that the guest code is
9863 * allowed to access the I/O port. (The I/O port is in the DX register in the
9864 * guest state.)
9865 *
9866 * @returns Strict VBox status code.
9867 * @param pVCpu The cross context virtual CPU structure.
9868 * @param cbValue The size of the I/O port access (1, 2, or 4).
9869 * @param enmAddrMode The addressing mode.
9870 * @param fRepPrefix Indicates whether a repeat prefix is used
9871 * (doesn't matter which for this instruction).
9872 * @param cbInstr The instruction length in bytes.
9873 * @param fIoChecked Whether the access to the I/O port has been
9874 * checked or not. It's typically checked in the
9875 * HM scenario.
9876 */
9877VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9878 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
9879{
9880 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9881
9882 /*
9883 * State init.
9884 */
9885 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9886
9887 /*
9888 * Switch orgy for getting to the right handler.
9889 */
9890 VBOXSTRICTRC rcStrict;
9891 if (fRepPrefix)
9892 {
9893 switch (enmAddrMode)
9894 {
9895 case IEMMODE_16BIT:
9896 switch (cbValue)
9897 {
9898 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
9899 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
9900 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
9901 default:
9902 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9903 }
9904 break;
9905
9906 case IEMMODE_32BIT:
9907 switch (cbValue)
9908 {
9909 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
9910 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
9911 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
9912 default:
9913 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9914 }
9915 break;
9916
9917 case IEMMODE_64BIT:
9918 switch (cbValue)
9919 {
9920 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
9921 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
9922 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
9923 default:
9924 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9925 }
9926 break;
9927
9928 default:
9929 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9930 }
9931 }
9932 else
9933 {
9934 switch (enmAddrMode)
9935 {
9936 case IEMMODE_16BIT:
9937 switch (cbValue)
9938 {
9939 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
9940 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
9941 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
9942 default:
9943 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9944 }
9945 break;
9946
9947 case IEMMODE_32BIT:
9948 switch (cbValue)
9949 {
9950 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
9951 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
9952 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
9953 default:
9954 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9955 }
9956 break;
9957
9958 case IEMMODE_64BIT:
9959 switch (cbValue)
9960 {
9961 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
9962 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
9963 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
9964 default:
9965 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9966 }
9967 break;
9968
9969 default:
9970 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9971 }
9972 }
9973
9974 if ( pVCpu->iem.s.cActiveMappings == 0
9975 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
9976 { /* likely */ }
9977 else
9978 {
9979 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
9980 iemMemRollback(pVCpu);
9981 }
9982 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9983}
9984
9985
9986/**
9987 * Interface for rawmode to write execute an OUT instruction.
9988 *
9989 * @returns Strict VBox status code.
9990 * @param pVCpu The cross context virtual CPU structure.
9991 * @param cbInstr The instruction length in bytes.
9992 * @param u16Port The port to read.
9993 * @param fImm Whether the port is specified using an immediate operand or
9994 * using the implicit DX register.
9995 * @param cbReg The register size.
9996 *
9997 * @remarks In ring-0 not all of the state needs to be synced in.
9998 */
9999VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10000{
10001 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10002 Assert(cbReg <= 4 && cbReg != 3);
10003
10004 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10005 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10006 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10007 Assert(!pVCpu->iem.s.cActiveMappings);
10008 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10009}
10010
10011
10012/**
10013 * Interface for rawmode to write execute an IN instruction.
10014 *
10015 * @returns Strict VBox status code.
10016 * @param pVCpu The cross context virtual CPU structure.
10017 * @param cbInstr The instruction length in bytes.
10018 * @param u16Port The port to read.
10019 * @param fImm Whether the port is specified using an immediate operand or
10020 * using the implicit DX.
10021 * @param cbReg The register size.
10022 */
10023VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10024{
10025 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10026 Assert(cbReg <= 4 && cbReg != 3);
10027
10028 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10029 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10030 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10031 Assert(!pVCpu->iem.s.cActiveMappings);
10032 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10033}
10034
10035
10036/**
10037 * Interface for HM and EM to write to a CRx register.
10038 *
10039 * @returns Strict VBox status code.
10040 * @param pVCpu The cross context virtual CPU structure.
10041 * @param cbInstr The instruction length in bytes.
10042 * @param iCrReg The control register number (destination).
10043 * @param iGReg The general purpose register number (source).
10044 *
10045 * @remarks In ring-0 not all of the state needs to be synced in.
10046 */
10047VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10048{
10049 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10050 Assert(iCrReg < 16);
10051 Assert(iGReg < 16);
10052
10053 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10054 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10055 Assert(!pVCpu->iem.s.cActiveMappings);
10056 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10057}
10058
10059
10060/**
10061 * Interface for HM and EM to read from a CRx register.
10062 *
10063 * @returns Strict VBox status code.
10064 * @param pVCpu The cross context virtual CPU structure.
10065 * @param cbInstr The instruction length in bytes.
10066 * @param iGReg The general purpose register number (destination).
10067 * @param iCrReg The control register number (source).
10068 *
10069 * @remarks In ring-0 not all of the state needs to be synced in.
10070 */
10071VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10072{
10073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10074 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10075 | CPUMCTX_EXTRN_APIC_TPR);
10076 Assert(iCrReg < 16);
10077 Assert(iGReg < 16);
10078
10079 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10080 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10081 Assert(!pVCpu->iem.s.cActiveMappings);
10082 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10083}
10084
10085
10086/**
10087 * Interface for HM and EM to write to a DRx register.
10088 *
10089 * @returns Strict VBox status code.
10090 * @param pVCpu The cross context virtual CPU structure.
10091 * @param cbInstr The instruction length in bytes.
10092 * @param iDrReg The debug register number (destination).
10093 * @param iGReg The general purpose register number (source).
10094 *
10095 * @remarks In ring-0 not all of the state needs to be synced in.
10096 */
10097VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10098{
10099 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10100 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10101 Assert(iDrReg < 8);
10102 Assert(iGReg < 16);
10103
10104 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10105 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10106 Assert(!pVCpu->iem.s.cActiveMappings);
10107 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10108}
10109
10110
10111/**
10112 * Interface for HM and EM to read from a DRx register.
10113 *
10114 * @returns Strict VBox status code.
10115 * @param pVCpu The cross context virtual CPU structure.
10116 * @param cbInstr The instruction length in bytes.
10117 * @param iGReg The general purpose register number (destination).
10118 * @param iDrReg The debug register number (source).
10119 *
10120 * @remarks In ring-0 not all of the state needs to be synced in.
10121 */
10122VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10123{
10124 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10125 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10126 Assert(iDrReg < 8);
10127 Assert(iGReg < 16);
10128
10129 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10130 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10131 Assert(!pVCpu->iem.s.cActiveMappings);
10132 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10133}
10134
10135
10136/**
10137 * Interface for HM and EM to clear the CR0[TS] bit.
10138 *
10139 * @returns Strict VBox status code.
10140 * @param pVCpu The cross context virtual CPU structure.
10141 * @param cbInstr The instruction length in bytes.
10142 *
10143 * @remarks In ring-0 not all of the state needs to be synced in.
10144 */
10145VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10146{
10147 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10148
10149 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10150 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10151 Assert(!pVCpu->iem.s.cActiveMappings);
10152 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10153}
10154
10155
10156/**
10157 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10158 *
10159 * @returns Strict VBox status code.
10160 * @param pVCpu The cross context virtual CPU structure.
10161 * @param cbInstr The instruction length in bytes.
10162 * @param uValue The value to load into CR0.
10163 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10164 * memory operand. Otherwise pass NIL_RTGCPTR.
10165 *
10166 * @remarks In ring-0 not all of the state needs to be synced in.
10167 */
10168VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10169{
10170 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10171
10172 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10174 Assert(!pVCpu->iem.s.cActiveMappings);
10175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10176}
10177
10178
10179/**
10180 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10181 *
10182 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10183 *
10184 * @returns Strict VBox status code.
10185 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10186 * @param cbInstr The instruction length in bytes.
10187 * @remarks In ring-0 not all of the state needs to be synced in.
10188 * @thread EMT(pVCpu)
10189 */
10190VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10191{
10192 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10193
10194 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10195 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10196 Assert(!pVCpu->iem.s.cActiveMappings);
10197 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10198}
10199
10200
10201/**
10202 * Interface for HM and EM to emulate the WBINVD instruction.
10203 *
10204 * @returns Strict VBox status code.
10205 * @param pVCpu The cross context virtual CPU structure.
10206 * @param cbInstr The instruction length in bytes.
10207 *
10208 * @remarks In ring-0 not all of the state needs to be synced in.
10209 */
10210VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10211{
10212 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10213
10214 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10215 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10216 Assert(!pVCpu->iem.s.cActiveMappings);
10217 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10218}
10219
10220
10221/**
10222 * Interface for HM and EM to emulate the INVD instruction.
10223 *
10224 * @returns Strict VBox status code.
10225 * @param pVCpu The cross context virtual CPU structure.
10226 * @param cbInstr The instruction length in bytes.
10227 *
10228 * @remarks In ring-0 not all of the state needs to be synced in.
10229 */
10230VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10231{
10232 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10233
10234 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10235 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10236 Assert(!pVCpu->iem.s.cActiveMappings);
10237 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10238}
10239
10240
10241/**
10242 * Interface for HM and EM to emulate the INVLPG instruction.
10243 *
10244 * @returns Strict VBox status code.
10245 * @retval VINF_PGM_SYNC_CR3
10246 *
10247 * @param pVCpu The cross context virtual CPU structure.
10248 * @param cbInstr The instruction length in bytes.
10249 * @param GCPtrPage The effective address of the page to invalidate.
10250 *
10251 * @remarks In ring-0 not all of the state needs to be synced in.
10252 */
10253VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10254{
10255 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10256
10257 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10258 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10259 Assert(!pVCpu->iem.s.cActiveMappings);
10260 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10261}
10262
10263
10264/**
10265 * Interface for HM and EM to emulate the INVPCID instruction.
10266 *
10267 * @returns Strict VBox status code.
10268 * @retval VINF_PGM_SYNC_CR3
10269 *
10270 * @param pVCpu The cross context virtual CPU structure.
10271 * @param cbInstr The instruction length in bytes.
10272 * @param iEffSeg The effective segment register.
10273 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10274 * @param uType The invalidation type.
10275 *
10276 * @remarks In ring-0 not all of the state needs to be synced in.
10277 */
10278VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10279 uint64_t uType)
10280{
10281 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10282
10283 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10284 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10285 Assert(!pVCpu->iem.s.cActiveMappings);
10286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10287}
10288
10289
10290/**
10291 * Interface for HM and EM to emulate the CPUID instruction.
10292 *
10293 * @returns Strict VBox status code.
10294 *
10295 * @param pVCpu The cross context virtual CPU structure.
10296 * @param cbInstr The instruction length in bytes.
10297 *
10298 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10299 */
10300VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10301{
10302 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10303 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10304
10305 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10306 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10307 Assert(!pVCpu->iem.s.cActiveMappings);
10308 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10309}
10310
10311
10312/**
10313 * Interface for HM and EM to emulate the RDPMC instruction.
10314 *
10315 * @returns Strict VBox status code.
10316 *
10317 * @param pVCpu The cross context virtual CPU structure.
10318 * @param cbInstr The instruction length in bytes.
10319 *
10320 * @remarks Not all of the state needs to be synced in.
10321 */
10322VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10323{
10324 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10325 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10326
10327 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10328 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10329 Assert(!pVCpu->iem.s.cActiveMappings);
10330 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10331}
10332
10333
10334/**
10335 * Interface for HM and EM to emulate the RDTSC instruction.
10336 *
10337 * @returns Strict VBox status code.
10338 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10339 *
10340 * @param pVCpu The cross context virtual CPU structure.
10341 * @param cbInstr The instruction length in bytes.
10342 *
10343 * @remarks Not all of the state needs to be synced in.
10344 */
10345VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10346{
10347 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10348 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10349
10350 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10352 Assert(!pVCpu->iem.s.cActiveMappings);
10353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10354}
10355
10356
10357/**
10358 * Interface for HM and EM to emulate the RDTSCP instruction.
10359 *
10360 * @returns Strict VBox status code.
10361 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10362 *
10363 * @param pVCpu The cross context virtual CPU structure.
10364 * @param cbInstr The instruction length in bytes.
10365 *
10366 * @remarks Not all of the state needs to be synced in. Recommended
10367 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10368 */
10369VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10370{
10371 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10372 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10373
10374 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10375 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10376 Assert(!pVCpu->iem.s.cActiveMappings);
10377 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10378}
10379
10380
10381/**
10382 * Interface for HM and EM to emulate the RDMSR instruction.
10383 *
10384 * @returns Strict VBox status code.
10385 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10386 *
10387 * @param pVCpu The cross context virtual CPU structure.
10388 * @param cbInstr The instruction length in bytes.
10389 *
10390 * @remarks Not all of the state needs to be synced in. Requires RCX and
10391 * (currently) all MSRs.
10392 */
10393VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10394{
10395 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10396 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10397
10398 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10399 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10400 Assert(!pVCpu->iem.s.cActiveMappings);
10401 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10402}
10403
10404
10405/**
10406 * Interface for HM and EM to emulate the WRMSR instruction.
10407 *
10408 * @returns Strict VBox status code.
10409 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10410 *
10411 * @param pVCpu The cross context virtual CPU structure.
10412 * @param cbInstr The instruction length in bytes.
10413 *
10414 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10415 * and (currently) all MSRs.
10416 */
10417VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10418{
10419 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10420 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10421 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10422
10423 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10424 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10425 Assert(!pVCpu->iem.s.cActiveMappings);
10426 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10427}
10428
10429
10430/**
10431 * Interface for HM and EM to emulate the MONITOR instruction.
10432 *
10433 * @returns Strict VBox status code.
10434 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10435 *
10436 * @param pVCpu The cross context virtual CPU structure.
10437 * @param cbInstr The instruction length in bytes.
10438 *
10439 * @remarks Not all of the state needs to be synced in.
10440 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10441 * are used.
10442 */
10443VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10444{
10445 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10446 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10447
10448 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10449 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10450 Assert(!pVCpu->iem.s.cActiveMappings);
10451 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10452}
10453
10454
10455/**
10456 * Interface for HM and EM to emulate the MWAIT instruction.
10457 *
10458 * @returns Strict VBox status code.
10459 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10460 *
10461 * @param pVCpu The cross context virtual CPU structure.
10462 * @param cbInstr The instruction length in bytes.
10463 *
10464 * @remarks Not all of the state needs to be synced in.
10465 */
10466VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10467{
10468 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10469 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10470
10471 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10472 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10473 Assert(!pVCpu->iem.s.cActiveMappings);
10474 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10475}
10476
10477
10478/**
10479 * Interface for HM and EM to emulate the HLT instruction.
10480 *
10481 * @returns Strict VBox status code.
10482 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10483 *
10484 * @param pVCpu The cross context virtual CPU structure.
10485 * @param cbInstr The instruction length in bytes.
10486 *
10487 * @remarks Not all of the state needs to be synced in.
10488 */
10489VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10490{
10491 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10492
10493 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10494 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10495 Assert(!pVCpu->iem.s.cActiveMappings);
10496 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10497}
10498
10499
10500/**
10501 * Checks if IEM is in the process of delivering an event (interrupt or
10502 * exception).
10503 *
10504 * @returns true if we're in the process of raising an interrupt or exception,
10505 * false otherwise.
10506 * @param pVCpu The cross context virtual CPU structure.
10507 * @param puVector Where to store the vector associated with the
10508 * currently delivered event, optional.
10509 * @param pfFlags Where to store th event delivery flags (see
10510 * IEM_XCPT_FLAGS_XXX), optional.
10511 * @param puErr Where to store the error code associated with the
10512 * event, optional.
10513 * @param puCr2 Where to store the CR2 associated with the event,
10514 * optional.
10515 * @remarks The caller should check the flags to determine if the error code and
10516 * CR2 are valid for the event.
10517 */
10518VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10519{
10520 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10521 if (fRaisingXcpt)
10522 {
10523 if (puVector)
10524 *puVector = pVCpu->iem.s.uCurXcpt;
10525 if (pfFlags)
10526 *pfFlags = pVCpu->iem.s.fCurXcpt;
10527 if (puErr)
10528 *puErr = pVCpu->iem.s.uCurXcptErr;
10529 if (puCr2)
10530 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10531 }
10532 return fRaisingXcpt;
10533}
10534
10535#ifdef IN_RING3
10536
10537/**
10538 * Handles the unlikely and probably fatal merge cases.
10539 *
10540 * @returns Merged status code.
10541 * @param rcStrict Current EM status code.
10542 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10543 * with @a rcStrict.
10544 * @param iMemMap The memory mapping index. For error reporting only.
10545 * @param pVCpu The cross context virtual CPU structure of the calling
10546 * thread, for error reporting only.
10547 */
10548DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10549 unsigned iMemMap, PVMCPUCC pVCpu)
10550{
10551 if (RT_FAILURE_NP(rcStrict))
10552 return rcStrict;
10553
10554 if (RT_FAILURE_NP(rcStrictCommit))
10555 return rcStrictCommit;
10556
10557 if (rcStrict == rcStrictCommit)
10558 return rcStrictCommit;
10559
10560 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10561 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10562 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10563 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10564 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10565 return VERR_IOM_FF_STATUS_IPE;
10566}
10567
10568
10569/**
10570 * Helper for IOMR3ProcessForceFlag.
10571 *
10572 * @returns Merged status code.
10573 * @param rcStrict Current EM status code.
10574 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10575 * with @a rcStrict.
10576 * @param iMemMap The memory mapping index. For error reporting only.
10577 * @param pVCpu The cross context virtual CPU structure of the calling
10578 * thread, for error reporting only.
10579 */
10580DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10581{
10582 /* Simple. */
10583 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10584 return rcStrictCommit;
10585
10586 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10587 return rcStrict;
10588
10589 /* EM scheduling status codes. */
10590 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10591 && rcStrict <= VINF_EM_LAST))
10592 {
10593 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10594 && rcStrictCommit <= VINF_EM_LAST))
10595 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10596 }
10597
10598 /* Unlikely */
10599 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10600}
10601
10602
10603/**
10604 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10605 *
10606 * @returns Merge between @a rcStrict and what the commit operation returned.
10607 * @param pVM The cross context VM structure.
10608 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10609 * @param rcStrict The status code returned by ring-0 or raw-mode.
10610 */
10611VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10612{
10613 /*
10614 * Reset the pending commit.
10615 */
10616 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10617 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10618 ("%#x %#x %#x\n",
10619 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10620 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10621
10622 /*
10623 * Commit the pending bounce buffers (usually just one).
10624 */
10625 unsigned cBufs = 0;
10626 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10627 while (iMemMap-- > 0)
10628 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10629 {
10630 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10631 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10632 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10633
10634 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10635 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10636 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10637
10638 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10639 {
10640 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10641 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10642 pbBuf,
10643 cbFirst,
10644 PGMACCESSORIGIN_IEM);
10645 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10646 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10647 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10648 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10649 }
10650
10651 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10652 {
10653 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10654 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10655 pbBuf + cbFirst,
10656 cbSecond,
10657 PGMACCESSORIGIN_IEM);
10658 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10659 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10660 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10661 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10662 }
10663 cBufs++;
10664 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10665 }
10666
10667 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10668 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10669 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10670 pVCpu->iem.s.cActiveMappings = 0;
10671 return rcStrict;
10672}
10673
10674#endif /* IN_RING3 */
10675
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette