VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 104933

Last change on this file since 104933 was 104933, checked in by vboxsync, 5 months ago

VMM/PGM,IEM: Refactored+copied PGMGstGetPage into PGMGstQueryPage that takes care of table walking, setting A & D bits and validating the access. Use new function in IEM. [fixes] bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 438.0 KB
Line 
1/* $Id: IEMAll.cpp 104933 2024-06-15 00:44:02Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * @returns IEM_F_BRK_PENDING_XXX or zero.
202 * @param pVCpu The cross context virtual CPU structure of the
203 * calling thread.
204 *
205 * @note Don't call directly, use iemCalcExecDbgFlags instead.
206 */
207uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
208{
209 uint32_t fExec = 0;
210
211 /*
212 * Process guest breakpoints.
213 */
214#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
215 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
216 { \
217 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
218 { \
219 case X86_DR7_RW_EO: \
220 fExec |= IEM_F_PENDING_BRK_INSTR; \
221 break; \
222 case X86_DR7_RW_WO: \
223 case X86_DR7_RW_RW: \
224 fExec |= IEM_F_PENDING_BRK_DATA; \
225 break; \
226 case X86_DR7_RW_IO: \
227 fExec |= IEM_F_PENDING_BRK_X86_IO; \
228 break; \
229 } \
230 } \
231 } while (0)
232
233 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
234 if (fGstDr7 & X86_DR7_ENABLED_MASK)
235 {
236 PROCESS_ONE_BP(fGstDr7, 0);
237 PROCESS_ONE_BP(fGstDr7, 1);
238 PROCESS_ONE_BP(fGstDr7, 2);
239 PROCESS_ONE_BP(fGstDr7, 3);
240 }
241
242 /*
243 * Process hypervisor breakpoints.
244 */
245 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
246 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
247 {
248 PROCESS_ONE_BP(fHyperDr7, 0);
249 PROCESS_ONE_BP(fHyperDr7, 1);
250 PROCESS_ONE_BP(fHyperDr7, 2);
251 PROCESS_ONE_BP(fHyperDr7, 3);
252 }
253
254 return fExec;
255}
256
257
258/**
259 * Initializes the decoder state.
260 *
261 * iemReInitDecoder is mostly a copy of this function.
262 *
263 * @param pVCpu The cross context virtual CPU structure of the
264 * calling thread.
265 * @param fExecOpts Optional execution flags:
266 * - IEM_F_BYPASS_HANDLERS
267 * - IEM_F_X86_DISREGARD_LOCK
268 */
269DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
270{
271 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
272 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
281
282 /* Execution state: */
283 uint32_t fExec;
284 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
285
286 /* Decoder state: */
287 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
288 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
289 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
290 {
291 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
292 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
293 }
294 else
295 {
296 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
297 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
298 }
299 pVCpu->iem.s.fPrefixes = 0;
300 pVCpu->iem.s.uRexReg = 0;
301 pVCpu->iem.s.uRexB = 0;
302 pVCpu->iem.s.uRexIndex = 0;
303 pVCpu->iem.s.idxPrefix = 0;
304 pVCpu->iem.s.uVex3rdReg = 0;
305 pVCpu->iem.s.uVexLength = 0;
306 pVCpu->iem.s.fEvexStuff = 0;
307 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
308#ifdef IEM_WITH_CODE_TLB
309 pVCpu->iem.s.pbInstrBuf = NULL;
310 pVCpu->iem.s.offInstrNextByte = 0;
311 pVCpu->iem.s.offCurInstrStart = 0;
312# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
313 pVCpu->iem.s.offOpcode = 0;
314# endif
315# ifdef VBOX_STRICT
316 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
317 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
318 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
319 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
320# endif
321#else
322 pVCpu->iem.s.offOpcode = 0;
323 pVCpu->iem.s.cbOpcode = 0;
324#endif
325 pVCpu->iem.s.offModRm = 0;
326 pVCpu->iem.s.cActiveMappings = 0;
327 pVCpu->iem.s.iNextMapping = 0;
328 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
329
330#ifdef DBGFTRACE_ENABLED
331 switch (IEM_GET_CPU_MODE(pVCpu))
332 {
333 case IEMMODE_64BIT:
334 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
335 break;
336 case IEMMODE_32BIT:
337 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
338 break;
339 case IEMMODE_16BIT:
340 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
341 break;
342 }
343#endif
344}
345
346
347/**
348 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
349 *
350 * This is mostly a copy of iemInitDecoder.
351 *
352 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
353 */
354DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
355{
356 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
364 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
365
366 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
367 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
368 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
369
370 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
371 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
372 pVCpu->iem.s.enmEffAddrMode = enmMode;
373 if (enmMode != IEMMODE_64BIT)
374 {
375 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
376 pVCpu->iem.s.enmEffOpSize = enmMode;
377 }
378 else
379 {
380 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
381 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
382 }
383 pVCpu->iem.s.fPrefixes = 0;
384 pVCpu->iem.s.uRexReg = 0;
385 pVCpu->iem.s.uRexB = 0;
386 pVCpu->iem.s.uRexIndex = 0;
387 pVCpu->iem.s.idxPrefix = 0;
388 pVCpu->iem.s.uVex3rdReg = 0;
389 pVCpu->iem.s.uVexLength = 0;
390 pVCpu->iem.s.fEvexStuff = 0;
391 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
392#ifdef IEM_WITH_CODE_TLB
393 if (pVCpu->iem.s.pbInstrBuf)
394 {
395 uint64_t off = (enmMode == IEMMODE_64BIT
396 ? pVCpu->cpum.GstCtx.rip
397 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
398 - pVCpu->iem.s.uInstrBufPc;
399 if (off < pVCpu->iem.s.cbInstrBufTotal)
400 {
401 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
402 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
403 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
404 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
405 else
406 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
407 }
408 else
409 {
410 pVCpu->iem.s.pbInstrBuf = NULL;
411 pVCpu->iem.s.offInstrNextByte = 0;
412 pVCpu->iem.s.offCurInstrStart = 0;
413 pVCpu->iem.s.cbInstrBuf = 0;
414 pVCpu->iem.s.cbInstrBufTotal = 0;
415 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
416 }
417 }
418 else
419 {
420 pVCpu->iem.s.offInstrNextByte = 0;
421 pVCpu->iem.s.offCurInstrStart = 0;
422 pVCpu->iem.s.cbInstrBuf = 0;
423 pVCpu->iem.s.cbInstrBufTotal = 0;
424# ifdef VBOX_STRICT
425 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
426# endif
427 }
428# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
429 pVCpu->iem.s.offOpcode = 0;
430# endif
431#else /* !IEM_WITH_CODE_TLB */
432 pVCpu->iem.s.cbOpcode = 0;
433 pVCpu->iem.s.offOpcode = 0;
434#endif /* !IEM_WITH_CODE_TLB */
435 pVCpu->iem.s.offModRm = 0;
436 Assert(pVCpu->iem.s.cActiveMappings == 0);
437 pVCpu->iem.s.iNextMapping = 0;
438 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
439 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
440
441#ifdef DBGFTRACE_ENABLED
442 switch (enmMode)
443 {
444 case IEMMODE_64BIT:
445 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
446 break;
447 case IEMMODE_32BIT:
448 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
449 break;
450 case IEMMODE_16BIT:
451 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
452 break;
453 }
454#endif
455}
456
457
458
459/**
460 * Prefetch opcodes the first time when starting executing.
461 *
462 * @returns Strict VBox status code.
463 * @param pVCpu The cross context virtual CPU structure of the
464 * calling thread.
465 * @param fExecOpts Optional execution flags:
466 * - IEM_F_BYPASS_HANDLERS
467 * - IEM_F_X86_DISREGARD_LOCK
468 */
469static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
470{
471 iemInitDecoder(pVCpu, fExecOpts);
472
473#ifndef IEM_WITH_CODE_TLB
474 /*
475 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
476 *
477 * First translate CS:rIP to a physical address.
478 *
479 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
480 * all relevant bytes from the first page, as it ASSUMES it's only ever
481 * called for dealing with CS.LIM, page crossing and instructions that
482 * are too long.
483 */
484 uint32_t cbToTryRead;
485 RTGCPTR GCPtrPC;
486 if (IEM_IS_64BIT_CODE(pVCpu))
487 {
488 cbToTryRead = GUEST_PAGE_SIZE;
489 GCPtrPC = pVCpu->cpum.GstCtx.rip;
490 if (IEM_IS_CANONICAL(GCPtrPC))
491 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
492 else
493 return iemRaiseGeneralProtectionFault0(pVCpu);
494 }
495 else
496 {
497 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
498 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
499 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
500 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
501 else
502 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
503 if (cbToTryRead) { /* likely */ }
504 else /* overflowed */
505 {
506 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
507 cbToTryRead = UINT32_MAX;
508 }
509 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
510 Assert(GCPtrPC <= UINT32_MAX);
511 }
512
513 PGMPTWALKFAST WalkFast;
514 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
515 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
516 &WalkFast);
517 if (RT_SUCCESS(rc))
518 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
519 else
520 {
521 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
522# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
523/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
524 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
525 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
526 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
527# endif
528 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
529 }
530#if 0
531 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
532 else
533 {
534 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
535# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
536/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
537# error completely wrong
538 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
539 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
540# endif
541 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
542 }
543 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
544 else
545 {
546 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
547# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
548/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
549# error completely wrong.
550 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
551 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
552# endif
553 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
554 }
555#else
556 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
557 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
558#endif
559 RTGCPHYS const GCPhys = WalkFast.GCPhys;
560
561 /*
562 * Read the bytes at this address.
563 */
564 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
565 if (cbToTryRead > cbLeftOnPage)
566 cbToTryRead = cbLeftOnPage;
567 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
568 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
569
570 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
571 {
572 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
573 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
574 { /* likely */ }
575 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
578 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
579 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
580 }
581 else
582 {
583 Log((RT_SUCCESS(rcStrict)
584 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
585 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
586 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
587 return rcStrict;
588 }
589 }
590 else
591 {
592 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
593 if (RT_SUCCESS(rc))
594 { /* likely */ }
595 else
596 {
597 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
598 GCPtrPC, GCPhys, rc, cbToTryRead));
599 return rc;
600 }
601 }
602 pVCpu->iem.s.cbOpcode = cbToTryRead;
603#endif /* !IEM_WITH_CODE_TLB */
604 return VINF_SUCCESS;
605}
606
607
608/**
609 * Invalidates the IEM TLBs.
610 *
611 * This is called internally as well as by PGM when moving GC mappings.
612 *
613 * @param pVCpu The cross context virtual CPU structure of the calling
614 * thread.
615 */
616VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
617{
618#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
619 Log10(("IEMTlbInvalidateAll\n"));
620# ifdef IEM_WITH_CODE_TLB
621 pVCpu->iem.s.cbInstrBufTotal = 0;
622 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
623 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
624 { /* very likely */ }
625 else
626 {
627 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
628 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
629 while (i-- > 0)
630 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
631 }
632# endif
633
634# ifdef IEM_WITH_DATA_TLB
635 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
636 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
637 { /* very likely */ }
638 else
639 {
640 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
641 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
642 while (i-- > 0)
643 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
644 }
645# endif
646#else
647 RT_NOREF(pVCpu);
648#endif
649}
650
651
652/**
653 * Invalidates a page in the TLBs.
654 *
655 * @param pVCpu The cross context virtual CPU structure of the calling
656 * thread.
657 * @param GCPtr The address of the page to invalidate
658 * @thread EMT(pVCpu)
659 */
660VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
661{
662#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
663 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
664 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
665 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
666 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
667
668# ifdef IEM_WITH_CODE_TLB
669 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
670 {
671 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
672 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
673 pVCpu->iem.s.cbInstrBufTotal = 0;
674 }
675# endif
676
677# ifdef IEM_WITH_DATA_TLB
678 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
679 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
680# endif
681#else
682 NOREF(pVCpu); NOREF(GCPtr);
683#endif
684}
685
686
687#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
688/**
689 * Invalid both TLBs slow fashion following a rollover.
690 *
691 * Worker for IEMTlbInvalidateAllPhysical,
692 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
693 * iemMemMapJmp and others.
694 *
695 * @thread EMT(pVCpu)
696 */
697static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
698{
699 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
700 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
701 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
702
703 unsigned i;
704# ifdef IEM_WITH_CODE_TLB
705 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
706 while (i-- > 0)
707 {
708 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
709 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
710 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
711 }
712# endif
713# ifdef IEM_WITH_DATA_TLB
714 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
715 while (i-- > 0)
716 {
717 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
718 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
719 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
720 }
721# endif
722
723}
724#endif
725
726
727/**
728 * Invalidates the host physical aspects of the IEM TLBs.
729 *
730 * This is called internally as well as by PGM when moving GC mappings.
731 *
732 * @param pVCpu The cross context virtual CPU structure of the calling
733 * thread.
734 * @note Currently not used.
735 */
736VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
737{
738#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
739 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
740 Log10(("IEMTlbInvalidateAllPhysical\n"));
741
742# ifdef IEM_WITH_CODE_TLB
743 pVCpu->iem.s.cbInstrBufTotal = 0;
744# endif
745 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
746 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
747 {
748 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
749 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
750 }
751 else
752 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
753#else
754 NOREF(pVCpu);
755#endif
756}
757
758
759/**
760 * Invalidates the host physical aspects of the IEM TLBs.
761 *
762 * This is called internally as well as by PGM when moving GC mappings.
763 *
764 * @param pVM The cross context VM structure.
765 * @param idCpuCaller The ID of the calling EMT if available to the caller,
766 * otherwise NIL_VMCPUID.
767 * @param enmReason The reason we're called.
768 *
769 * @remarks Caller holds the PGM lock.
770 */
771VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
772{
773#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
774 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
775 if (pVCpuCaller)
776 VMCPU_ASSERT_EMT(pVCpuCaller);
777 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
778
779 VMCC_FOR_EACH_VMCPU(pVM)
780 {
781# ifdef IEM_WITH_CODE_TLB
782 if (pVCpuCaller == pVCpu)
783 pVCpu->iem.s.cbInstrBufTotal = 0;
784# endif
785
786 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
787 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
788 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
789 { /* likely */}
790 else if (pVCpuCaller != pVCpu)
791 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
792 else
793 {
794 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
795 continue;
796 }
797 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
798 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
799 }
800 VMCC_FOR_EACH_VMCPU_END(pVM);
801
802#else
803 RT_NOREF(pVM, idCpuCaller, enmReason);
804#endif
805}
806
807
808/**
809 * Flushes the prefetch buffer, light version.
810 */
811void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
812{
813#ifndef IEM_WITH_CODE_TLB
814 pVCpu->iem.s.cbOpcode = cbInstr;
815#else
816 RT_NOREF(pVCpu, cbInstr);
817#endif
818}
819
820
821/**
822 * Flushes the prefetch buffer, heavy version.
823 */
824void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
825{
826#ifndef IEM_WITH_CODE_TLB
827 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
828#elif 1
829 pVCpu->iem.s.cbInstrBufTotal = 0;
830 RT_NOREF(cbInstr);
831#else
832 RT_NOREF(pVCpu, cbInstr);
833#endif
834}
835
836
837
838#ifdef IEM_WITH_CODE_TLB
839
840/**
841 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
842 * failure and jumps.
843 *
844 * We end up here for a number of reasons:
845 * - pbInstrBuf isn't yet initialized.
846 * - Advancing beyond the buffer boundrary (e.g. cross page).
847 * - Advancing beyond the CS segment limit.
848 * - Fetching from non-mappable page (e.g. MMIO).
849 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
850 *
851 * @param pVCpu The cross context virtual CPU structure of the
852 * calling thread.
853 * @param pvDst Where to return the bytes.
854 * @param cbDst Number of bytes to read. A value of zero is
855 * allowed for initializing pbInstrBuf (the
856 * recompiler does this). In this case it is best
857 * to set pbInstrBuf to NULL prior to the call.
858 */
859void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
860{
861# ifdef IN_RING3
862 for (;;)
863 {
864 Assert(cbDst <= 8);
865 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
866
867 /*
868 * We might have a partial buffer match, deal with that first to make the
869 * rest simpler. This is the first part of the cross page/buffer case.
870 */
871 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
872 if (pbInstrBuf != NULL)
873 {
874 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
875 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
876 if (offBuf < cbInstrBuf)
877 {
878 Assert(offBuf + cbDst > cbInstrBuf);
879 uint32_t const cbCopy = cbInstrBuf - offBuf;
880 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
881
882 cbDst -= cbCopy;
883 pvDst = (uint8_t *)pvDst + cbCopy;
884 offBuf += cbCopy;
885 }
886 }
887
888 /*
889 * Check segment limit, figuring how much we're allowed to access at this point.
890 *
891 * We will fault immediately if RIP is past the segment limit / in non-canonical
892 * territory. If we do continue, there are one or more bytes to read before we
893 * end up in trouble and we need to do that first before faulting.
894 */
895 RTGCPTR GCPtrFirst;
896 uint32_t cbMaxRead;
897 if (IEM_IS_64BIT_CODE(pVCpu))
898 {
899 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
900 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
901 { /* likely */ }
902 else
903 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
904 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
905 }
906 else
907 {
908 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
909 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
910 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
911 { /* likely */ }
912 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
913 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
914 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
915 if (cbMaxRead != 0)
916 { /* likely */ }
917 else
918 {
919 /* Overflowed because address is 0 and limit is max. */
920 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
921 cbMaxRead = X86_PAGE_SIZE;
922 }
923 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
924 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
925 if (cbMaxRead2 < cbMaxRead)
926 cbMaxRead = cbMaxRead2;
927 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
928 }
929
930 /*
931 * Get the TLB entry for this piece of code.
932 */
933 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
934 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
935 if (pTlbe->uTag == uTag)
936 {
937 /* likely when executing lots of code, otherwise unlikely */
938# ifdef VBOX_WITH_STATISTICS
939 pVCpu->iem.s.CodeTlb.cTlbHits++;
940# endif
941 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
942
943 /* Check TLB page table level access flags. */
944 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
945 {
946 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
947 {
948 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
949 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
950 }
951 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
952 {
953 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
954 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
955 }
956 }
957
958 /* Look up the physical page info if necessary. */
959 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
960 { /* not necessary */ }
961 else
962 {
963 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
964 { /* likely */ }
965 else
966 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
967 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
968 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
969 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
970 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
971 }
972 }
973 else
974 {
975 pVCpu->iem.s.CodeTlb.cTlbMisses++;
976
977 /* This page table walking will set A bits as required by the access while performing the walk.
978 ASSUMES these are set when the address is translated rather than on commit... */
979 /** @todo testcase: check when A bits are actually set by the CPU for code. */
980 PGMPTWALKFAST WalkFast;
981 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
982 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
983 &WalkFast);
984 if (RT_SUCCESS(rc))
985 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
986 else
987 {
988#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
989 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
990 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
991#endif
992 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
993 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
994 }
995
996 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
997 pTlbe->uTag = uTag;
998 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
999 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/;
1000 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1001 pTlbe->GCPhys = GCPhysPg;
1002 pTlbe->pbMappingR3 = NULL;
1003 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1004 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1005 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1006
1007 /* Resolve the physical address. */
1008 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1009 { /* likely */ }
1010 else
1011 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1012 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1013 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1014 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1015 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1016 }
1017
1018# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1019 /*
1020 * Try do a direct read using the pbMappingR3 pointer.
1021 * Note! Do not recheck the physical TLB revision number here as we have the
1022 * wrong response to changes in the else case. If someone is updating
1023 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1024 * pretending we always won the race.
1025 */
1026 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1027 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1028 {
1029 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1030 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1031 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1032 {
1033 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1034 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1035 }
1036 else
1037 {
1038 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1039 if (cbInstr + (uint32_t)cbDst <= 15)
1040 {
1041 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1042 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1043 }
1044 else
1045 {
1046 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1047 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1048 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1049 }
1050 }
1051 if (cbDst <= cbMaxRead)
1052 {
1053 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1054 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1055
1056 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1057 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1058 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1059 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1060 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1061 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1062 else
1063 Assert(!pvDst);
1064 return;
1065 }
1066 pVCpu->iem.s.pbInstrBuf = NULL;
1067
1068 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1069 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1070 }
1071# else
1072# error "refactor as needed"
1073 /*
1074 * If there is no special read handling, so we can read a bit more and
1075 * put it in the prefetch buffer.
1076 */
1077 if ( cbDst < cbMaxRead
1078 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1079 {
1080 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1081 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1082 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1083 { /* likely */ }
1084 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1085 {
1086 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1087 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1088 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1089 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1090 }
1091 else
1092 {
1093 Log((RT_SUCCESS(rcStrict)
1094 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1095 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1096 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1097 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1098 }
1099 }
1100# endif
1101 /*
1102 * Special read handling, so only read exactly what's needed.
1103 * This is a highly unlikely scenario.
1104 */
1105 else
1106 {
1107 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1108
1109 /* Check instruction length. */
1110 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1111 if (RT_LIKELY(cbInstr + cbDst <= 15))
1112 { /* likely */ }
1113 else
1114 {
1115 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1116 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1117 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1118 }
1119
1120 /* Do the reading. */
1121 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1122 if (cbToRead > 0)
1123 {
1124 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1125 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1126 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1127 { /* likely */ }
1128 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1129 {
1130 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1131 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1132 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1133 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1134 }
1135 else
1136 {
1137 Log((RT_SUCCESS(rcStrict)
1138 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1139 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1140 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1141 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1142 }
1143 }
1144
1145 /* Update the state and probably return. */
1146 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1147 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1148 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1149
1150 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1151 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1152 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1153 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1154 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1155 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1156 pVCpu->iem.s.pbInstrBuf = NULL;
1157 if (cbToRead == cbDst)
1158 return;
1159 Assert(cbToRead == cbMaxRead);
1160 }
1161
1162 /*
1163 * More to read, loop.
1164 */
1165 cbDst -= cbMaxRead;
1166 pvDst = (uint8_t *)pvDst + cbMaxRead;
1167 }
1168# else /* !IN_RING3 */
1169 RT_NOREF(pvDst, cbDst);
1170 if (pvDst || cbDst)
1171 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1172# endif /* !IN_RING3 */
1173}
1174
1175#else /* !IEM_WITH_CODE_TLB */
1176
1177/**
1178 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1179 * exception if it fails.
1180 *
1181 * @returns Strict VBox status code.
1182 * @param pVCpu The cross context virtual CPU structure of the
1183 * calling thread.
1184 * @param cbMin The minimum number of bytes relative offOpcode
1185 * that must be read.
1186 */
1187VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1188{
1189 /*
1190 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1191 *
1192 * First translate CS:rIP to a physical address.
1193 */
1194 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1195 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1196 uint8_t const cbLeft = cbOpcode - offOpcode;
1197 Assert(cbLeft < cbMin);
1198 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1199
1200 uint32_t cbToTryRead;
1201 RTGCPTR GCPtrNext;
1202 if (IEM_IS_64BIT_CODE(pVCpu))
1203 {
1204 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1205 if (!IEM_IS_CANONICAL(GCPtrNext))
1206 return iemRaiseGeneralProtectionFault0(pVCpu);
1207 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1208 }
1209 else
1210 {
1211 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1212 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1213 GCPtrNext32 += cbOpcode;
1214 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1215 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1216 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1217 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1218 if (!cbToTryRead) /* overflowed */
1219 {
1220 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1221 cbToTryRead = UINT32_MAX;
1222 /** @todo check out wrapping around the code segment. */
1223 }
1224 if (cbToTryRead < cbMin - cbLeft)
1225 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1226 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1227
1228 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1229 if (cbToTryRead > cbLeftOnPage)
1230 cbToTryRead = cbLeftOnPage;
1231 }
1232
1233 /* Restrict to opcode buffer space.
1234
1235 We're making ASSUMPTIONS here based on work done previously in
1236 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1237 be fetched in case of an instruction crossing two pages. */
1238 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1239 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1240 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1241 { /* likely */ }
1242 else
1243 {
1244 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1245 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1246 return iemRaiseGeneralProtectionFault0(pVCpu);
1247 }
1248
1249 PGMPTWALKFAST WalkFast;
1250 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1251 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1252 &WalkFast);
1253 if (RT_SUCCESS(rc))
1254 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1255 else
1256 {
1257 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1258#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1259 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1260 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1261#endif
1262 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1263 }
1264 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1265 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1266
1267 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1268 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1269
1270 /*
1271 * Read the bytes at this address.
1272 *
1273 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1274 * and since PATM should only patch the start of an instruction there
1275 * should be no need to check again here.
1276 */
1277 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1278 {
1279 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1280 cbToTryRead, PGMACCESSORIGIN_IEM);
1281 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1282 { /* likely */ }
1283 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1284 {
1285 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1286 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1287 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1288 }
1289 else
1290 {
1291 Log((RT_SUCCESS(rcStrict)
1292 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1293 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1294 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1295 return rcStrict;
1296 }
1297 }
1298 else
1299 {
1300 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1301 if (RT_SUCCESS(rc))
1302 { /* likely */ }
1303 else
1304 {
1305 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1306 return rc;
1307 }
1308 }
1309 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1310 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1311
1312 return VINF_SUCCESS;
1313}
1314
1315#endif /* !IEM_WITH_CODE_TLB */
1316#ifndef IEM_WITH_SETJMP
1317
1318/**
1319 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1320 *
1321 * @returns Strict VBox status code.
1322 * @param pVCpu The cross context virtual CPU structure of the
1323 * calling thread.
1324 * @param pb Where to return the opcode byte.
1325 */
1326VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1327{
1328 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1329 if (rcStrict == VINF_SUCCESS)
1330 {
1331 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1332 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1333 pVCpu->iem.s.offOpcode = offOpcode + 1;
1334 }
1335 else
1336 *pb = 0;
1337 return rcStrict;
1338}
1339
1340#else /* IEM_WITH_SETJMP */
1341
1342/**
1343 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1344 *
1345 * @returns The opcode byte.
1346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1347 */
1348uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1349{
1350# ifdef IEM_WITH_CODE_TLB
1351 uint8_t u8;
1352 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1353 return u8;
1354# else
1355 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1356 if (rcStrict == VINF_SUCCESS)
1357 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1358 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1359# endif
1360}
1361
1362#endif /* IEM_WITH_SETJMP */
1363
1364#ifndef IEM_WITH_SETJMP
1365
1366/**
1367 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1368 *
1369 * @returns Strict VBox status code.
1370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1371 * @param pu16 Where to return the opcode dword.
1372 */
1373VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1374{
1375 uint8_t u8;
1376 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1377 if (rcStrict == VINF_SUCCESS)
1378 *pu16 = (int8_t)u8;
1379 return rcStrict;
1380}
1381
1382
1383/**
1384 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1385 *
1386 * @returns Strict VBox status code.
1387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1388 * @param pu32 Where to return the opcode dword.
1389 */
1390VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1391{
1392 uint8_t u8;
1393 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1394 if (rcStrict == VINF_SUCCESS)
1395 *pu32 = (int8_t)u8;
1396 return rcStrict;
1397}
1398
1399
1400/**
1401 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1402 *
1403 * @returns Strict VBox status code.
1404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1405 * @param pu64 Where to return the opcode qword.
1406 */
1407VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1408{
1409 uint8_t u8;
1410 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1411 if (rcStrict == VINF_SUCCESS)
1412 *pu64 = (int8_t)u8;
1413 return rcStrict;
1414}
1415
1416#endif /* !IEM_WITH_SETJMP */
1417
1418
1419#ifndef IEM_WITH_SETJMP
1420
1421/**
1422 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1423 *
1424 * @returns Strict VBox status code.
1425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1426 * @param pu16 Where to return the opcode word.
1427 */
1428VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1429{
1430 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1431 if (rcStrict == VINF_SUCCESS)
1432 {
1433 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1434# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1435 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1436# else
1437 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1438# endif
1439 pVCpu->iem.s.offOpcode = offOpcode + 2;
1440 }
1441 else
1442 *pu16 = 0;
1443 return rcStrict;
1444}
1445
1446#else /* IEM_WITH_SETJMP */
1447
1448/**
1449 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1450 *
1451 * @returns The opcode word.
1452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1453 */
1454uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1455{
1456# ifdef IEM_WITH_CODE_TLB
1457 uint16_t u16;
1458 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1459 return u16;
1460# else
1461 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1462 if (rcStrict == VINF_SUCCESS)
1463 {
1464 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1465 pVCpu->iem.s.offOpcode += 2;
1466# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1467 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1468# else
1469 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1470# endif
1471 }
1472 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1473# endif
1474}
1475
1476#endif /* IEM_WITH_SETJMP */
1477
1478#ifndef IEM_WITH_SETJMP
1479
1480/**
1481 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1482 *
1483 * @returns Strict VBox status code.
1484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1485 * @param pu32 Where to return the opcode double word.
1486 */
1487VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1488{
1489 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1490 if (rcStrict == VINF_SUCCESS)
1491 {
1492 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1493 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1494 pVCpu->iem.s.offOpcode = offOpcode + 2;
1495 }
1496 else
1497 *pu32 = 0;
1498 return rcStrict;
1499}
1500
1501
1502/**
1503 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1504 *
1505 * @returns Strict VBox status code.
1506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1507 * @param pu64 Where to return the opcode quad word.
1508 */
1509VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1510{
1511 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1512 if (rcStrict == VINF_SUCCESS)
1513 {
1514 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1515 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1516 pVCpu->iem.s.offOpcode = offOpcode + 2;
1517 }
1518 else
1519 *pu64 = 0;
1520 return rcStrict;
1521}
1522
1523#endif /* !IEM_WITH_SETJMP */
1524
1525#ifndef IEM_WITH_SETJMP
1526
1527/**
1528 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1529 *
1530 * @returns Strict VBox status code.
1531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1532 * @param pu32 Where to return the opcode dword.
1533 */
1534VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1535{
1536 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1537 if (rcStrict == VINF_SUCCESS)
1538 {
1539 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1540# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1541 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1542# else
1543 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1544 pVCpu->iem.s.abOpcode[offOpcode + 1],
1545 pVCpu->iem.s.abOpcode[offOpcode + 2],
1546 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1547# endif
1548 pVCpu->iem.s.offOpcode = offOpcode + 4;
1549 }
1550 else
1551 *pu32 = 0;
1552 return rcStrict;
1553}
1554
1555#else /* IEM_WITH_SETJMP */
1556
1557/**
1558 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1559 *
1560 * @returns The opcode dword.
1561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1562 */
1563uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1564{
1565# ifdef IEM_WITH_CODE_TLB
1566 uint32_t u32;
1567 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1568 return u32;
1569# else
1570 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1571 if (rcStrict == VINF_SUCCESS)
1572 {
1573 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1574 pVCpu->iem.s.offOpcode = offOpcode + 4;
1575# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1576 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1577# else
1578 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1579 pVCpu->iem.s.abOpcode[offOpcode + 1],
1580 pVCpu->iem.s.abOpcode[offOpcode + 2],
1581 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1582# endif
1583 }
1584 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1585# endif
1586}
1587
1588#endif /* IEM_WITH_SETJMP */
1589
1590#ifndef IEM_WITH_SETJMP
1591
1592/**
1593 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1594 *
1595 * @returns Strict VBox status code.
1596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1597 * @param pu64 Where to return the opcode dword.
1598 */
1599VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1600{
1601 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1602 if (rcStrict == VINF_SUCCESS)
1603 {
1604 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1605 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1606 pVCpu->iem.s.abOpcode[offOpcode + 1],
1607 pVCpu->iem.s.abOpcode[offOpcode + 2],
1608 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1609 pVCpu->iem.s.offOpcode = offOpcode + 4;
1610 }
1611 else
1612 *pu64 = 0;
1613 return rcStrict;
1614}
1615
1616
1617/**
1618 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1619 *
1620 * @returns Strict VBox status code.
1621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1622 * @param pu64 Where to return the opcode qword.
1623 */
1624VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1625{
1626 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1627 if (rcStrict == VINF_SUCCESS)
1628 {
1629 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1630 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1631 pVCpu->iem.s.abOpcode[offOpcode + 1],
1632 pVCpu->iem.s.abOpcode[offOpcode + 2],
1633 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1634 pVCpu->iem.s.offOpcode = offOpcode + 4;
1635 }
1636 else
1637 *pu64 = 0;
1638 return rcStrict;
1639}
1640
1641#endif /* !IEM_WITH_SETJMP */
1642
1643#ifndef IEM_WITH_SETJMP
1644
1645/**
1646 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1647 *
1648 * @returns Strict VBox status code.
1649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1650 * @param pu64 Where to return the opcode qword.
1651 */
1652VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1653{
1654 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1655 if (rcStrict == VINF_SUCCESS)
1656 {
1657 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1658# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1659 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1660# else
1661 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1662 pVCpu->iem.s.abOpcode[offOpcode + 1],
1663 pVCpu->iem.s.abOpcode[offOpcode + 2],
1664 pVCpu->iem.s.abOpcode[offOpcode + 3],
1665 pVCpu->iem.s.abOpcode[offOpcode + 4],
1666 pVCpu->iem.s.abOpcode[offOpcode + 5],
1667 pVCpu->iem.s.abOpcode[offOpcode + 6],
1668 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1669# endif
1670 pVCpu->iem.s.offOpcode = offOpcode + 8;
1671 }
1672 else
1673 *pu64 = 0;
1674 return rcStrict;
1675}
1676
1677#else /* IEM_WITH_SETJMP */
1678
1679/**
1680 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1681 *
1682 * @returns The opcode qword.
1683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1684 */
1685uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1686{
1687# ifdef IEM_WITH_CODE_TLB
1688 uint64_t u64;
1689 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1690 return u64;
1691# else
1692 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1693 if (rcStrict == VINF_SUCCESS)
1694 {
1695 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1696 pVCpu->iem.s.offOpcode = offOpcode + 8;
1697# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1698 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1699# else
1700 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1701 pVCpu->iem.s.abOpcode[offOpcode + 1],
1702 pVCpu->iem.s.abOpcode[offOpcode + 2],
1703 pVCpu->iem.s.abOpcode[offOpcode + 3],
1704 pVCpu->iem.s.abOpcode[offOpcode + 4],
1705 pVCpu->iem.s.abOpcode[offOpcode + 5],
1706 pVCpu->iem.s.abOpcode[offOpcode + 6],
1707 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1708# endif
1709 }
1710 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1711# endif
1712}
1713
1714#endif /* IEM_WITH_SETJMP */
1715
1716
1717
1718/** @name Misc Worker Functions.
1719 * @{
1720 */
1721
1722/**
1723 * Gets the exception class for the specified exception vector.
1724 *
1725 * @returns The class of the specified exception.
1726 * @param uVector The exception vector.
1727 */
1728static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1729{
1730 Assert(uVector <= X86_XCPT_LAST);
1731 switch (uVector)
1732 {
1733 case X86_XCPT_DE:
1734 case X86_XCPT_TS:
1735 case X86_XCPT_NP:
1736 case X86_XCPT_SS:
1737 case X86_XCPT_GP:
1738 case X86_XCPT_SX: /* AMD only */
1739 return IEMXCPTCLASS_CONTRIBUTORY;
1740
1741 case X86_XCPT_PF:
1742 case X86_XCPT_VE: /* Intel only */
1743 return IEMXCPTCLASS_PAGE_FAULT;
1744
1745 case X86_XCPT_DF:
1746 return IEMXCPTCLASS_DOUBLE_FAULT;
1747 }
1748 return IEMXCPTCLASS_BENIGN;
1749}
1750
1751
1752/**
1753 * Evaluates how to handle an exception caused during delivery of another event
1754 * (exception / interrupt).
1755 *
1756 * @returns How to handle the recursive exception.
1757 * @param pVCpu The cross context virtual CPU structure of the
1758 * calling thread.
1759 * @param fPrevFlags The flags of the previous event.
1760 * @param uPrevVector The vector of the previous event.
1761 * @param fCurFlags The flags of the current exception.
1762 * @param uCurVector The vector of the current exception.
1763 * @param pfXcptRaiseInfo Where to store additional information about the
1764 * exception condition. Optional.
1765 */
1766VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1767 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1768{
1769 /*
1770 * Only CPU exceptions can be raised while delivering other events, software interrupt
1771 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1772 */
1773 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1774 Assert(pVCpu); RT_NOREF(pVCpu);
1775 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1776
1777 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1778 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1779 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1780 {
1781 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1782 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1783 {
1784 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1785 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1786 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1787 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1788 {
1789 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1790 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1791 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1792 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1793 uCurVector, pVCpu->cpum.GstCtx.cr2));
1794 }
1795 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1796 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1797 {
1798 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1799 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1800 }
1801 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1802 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1803 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1804 {
1805 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1806 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1807 }
1808 }
1809 else
1810 {
1811 if (uPrevVector == X86_XCPT_NMI)
1812 {
1813 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1814 if (uCurVector == X86_XCPT_PF)
1815 {
1816 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1817 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1818 }
1819 }
1820 else if ( uPrevVector == X86_XCPT_AC
1821 && uCurVector == X86_XCPT_AC)
1822 {
1823 enmRaise = IEMXCPTRAISE_CPU_HANG;
1824 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1825 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1826 }
1827 }
1828 }
1829 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1830 {
1831 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1832 if (uCurVector == X86_XCPT_PF)
1833 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1834 }
1835 else
1836 {
1837 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1838 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1839 }
1840
1841 if (pfXcptRaiseInfo)
1842 *pfXcptRaiseInfo = fRaiseInfo;
1843 return enmRaise;
1844}
1845
1846
1847/**
1848 * Enters the CPU shutdown state initiated by a triple fault or other
1849 * unrecoverable conditions.
1850 *
1851 * @returns Strict VBox status code.
1852 * @param pVCpu The cross context virtual CPU structure of the
1853 * calling thread.
1854 */
1855static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1856{
1857 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1858 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1859
1860 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1861 {
1862 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1863 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1864 }
1865
1866 RT_NOREF(pVCpu);
1867 return VINF_EM_TRIPLE_FAULT;
1868}
1869
1870
1871/**
1872 * Validates a new SS segment.
1873 *
1874 * @returns VBox strict status code.
1875 * @param pVCpu The cross context virtual CPU structure of the
1876 * calling thread.
1877 * @param NewSS The new SS selctor.
1878 * @param uCpl The CPL to load the stack for.
1879 * @param pDesc Where to return the descriptor.
1880 */
1881static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1882{
1883 /* Null selectors are not allowed (we're not called for dispatching
1884 interrupts with SS=0 in long mode). */
1885 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1886 {
1887 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1888 return iemRaiseTaskSwitchFault0(pVCpu);
1889 }
1890
1891 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1892 if ((NewSS & X86_SEL_RPL) != uCpl)
1893 {
1894 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1895 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1896 }
1897
1898 /*
1899 * Read the descriptor.
1900 */
1901 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1902 if (rcStrict != VINF_SUCCESS)
1903 return rcStrict;
1904
1905 /*
1906 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1907 */
1908 if (!pDesc->Legacy.Gen.u1DescType)
1909 {
1910 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1911 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1912 }
1913
1914 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1915 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1916 {
1917 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1918 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1919 }
1920 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1921 {
1922 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1923 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1924 }
1925
1926 /* Is it there? */
1927 /** @todo testcase: Is this checked before the canonical / limit check below? */
1928 if (!pDesc->Legacy.Gen.u1Present)
1929 {
1930 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1931 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1932 }
1933
1934 return VINF_SUCCESS;
1935}
1936
1937/** @} */
1938
1939
1940/** @name Raising Exceptions.
1941 *
1942 * @{
1943 */
1944
1945
1946/**
1947 * Loads the specified stack far pointer from the TSS.
1948 *
1949 * @returns VBox strict status code.
1950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1951 * @param uCpl The CPL to load the stack for.
1952 * @param pSelSS Where to return the new stack segment.
1953 * @param puEsp Where to return the new stack pointer.
1954 */
1955static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1956{
1957 VBOXSTRICTRC rcStrict;
1958 Assert(uCpl < 4);
1959
1960 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1961 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1962 {
1963 /*
1964 * 16-bit TSS (X86TSS16).
1965 */
1966 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1967 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1968 {
1969 uint32_t off = uCpl * 4 + 2;
1970 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1971 {
1972 /** @todo check actual access pattern here. */
1973 uint32_t u32Tmp = 0; /* gcc maybe... */
1974 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1975 if (rcStrict == VINF_SUCCESS)
1976 {
1977 *puEsp = RT_LOWORD(u32Tmp);
1978 *pSelSS = RT_HIWORD(u32Tmp);
1979 return VINF_SUCCESS;
1980 }
1981 }
1982 else
1983 {
1984 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1985 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1986 }
1987 break;
1988 }
1989
1990 /*
1991 * 32-bit TSS (X86TSS32).
1992 */
1993 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1994 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1995 {
1996 uint32_t off = uCpl * 8 + 4;
1997 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1998 {
1999/** @todo check actual access pattern here. */
2000 uint64_t u64Tmp;
2001 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2002 if (rcStrict == VINF_SUCCESS)
2003 {
2004 *puEsp = u64Tmp & UINT32_MAX;
2005 *pSelSS = (RTSEL)(u64Tmp >> 32);
2006 return VINF_SUCCESS;
2007 }
2008 }
2009 else
2010 {
2011 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2012 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2013 }
2014 break;
2015 }
2016
2017 default:
2018 AssertFailed();
2019 rcStrict = VERR_IEM_IPE_4;
2020 break;
2021 }
2022
2023 *puEsp = 0; /* make gcc happy */
2024 *pSelSS = 0; /* make gcc happy */
2025 return rcStrict;
2026}
2027
2028
2029/**
2030 * Loads the specified stack pointer from the 64-bit TSS.
2031 *
2032 * @returns VBox strict status code.
2033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2034 * @param uCpl The CPL to load the stack for.
2035 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2036 * @param puRsp Where to return the new stack pointer.
2037 */
2038static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2039{
2040 Assert(uCpl < 4);
2041 Assert(uIst < 8);
2042 *puRsp = 0; /* make gcc happy */
2043
2044 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2045 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2046
2047 uint32_t off;
2048 if (uIst)
2049 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2050 else
2051 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2052 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2053 {
2054 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2055 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2056 }
2057
2058 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2059}
2060
2061
2062/**
2063 * Adjust the CPU state according to the exception being raised.
2064 *
2065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2066 * @param u8Vector The exception that has been raised.
2067 */
2068DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2069{
2070 switch (u8Vector)
2071 {
2072 case X86_XCPT_DB:
2073 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2074 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2075 break;
2076 /** @todo Read the AMD and Intel exception reference... */
2077 }
2078}
2079
2080
2081/**
2082 * Implements exceptions and interrupts for real mode.
2083 *
2084 * @returns VBox strict status code.
2085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2086 * @param cbInstr The number of bytes to offset rIP by in the return
2087 * address.
2088 * @param u8Vector The interrupt / exception vector number.
2089 * @param fFlags The flags.
2090 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2091 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2092 */
2093static VBOXSTRICTRC
2094iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2095 uint8_t cbInstr,
2096 uint8_t u8Vector,
2097 uint32_t fFlags,
2098 uint16_t uErr,
2099 uint64_t uCr2) RT_NOEXCEPT
2100{
2101 NOREF(uErr); NOREF(uCr2);
2102 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2103
2104 /*
2105 * Read the IDT entry.
2106 */
2107 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2108 {
2109 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2110 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2111 }
2112 RTFAR16 Idte;
2113 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2114 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2115 {
2116 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2117 return rcStrict;
2118 }
2119
2120#ifdef LOG_ENABLED
2121 /* If software interrupt, try decode it if logging is enabled and such. */
2122 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2123 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2124 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2125#endif
2126
2127 /*
2128 * Push the stack frame.
2129 */
2130 uint8_t bUnmapInfo;
2131 uint16_t *pu16Frame;
2132 uint64_t uNewRsp;
2133 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2134 if (rcStrict != VINF_SUCCESS)
2135 return rcStrict;
2136
2137 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2138#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2139 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2140 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2141 fEfl |= UINT16_C(0xf000);
2142#endif
2143 pu16Frame[2] = (uint16_t)fEfl;
2144 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2145 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2146 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2147 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2148 return rcStrict;
2149
2150 /*
2151 * Load the vector address into cs:ip and make exception specific state
2152 * adjustments.
2153 */
2154 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2155 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2156 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2157 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2158 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2159 pVCpu->cpum.GstCtx.rip = Idte.off;
2160 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2161 IEMMISC_SET_EFL(pVCpu, fEfl);
2162
2163 /** @todo do we actually do this in real mode? */
2164 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2165 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2166
2167 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2168 so best leave them alone in case we're in a weird kind of real mode... */
2169
2170 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2171}
2172
2173
2174/**
2175 * Loads a NULL data selector into when coming from V8086 mode.
2176 *
2177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2178 * @param pSReg Pointer to the segment register.
2179 */
2180DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2181{
2182 pSReg->Sel = 0;
2183 pSReg->ValidSel = 0;
2184 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2185 {
2186 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2187 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2188 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2189 }
2190 else
2191 {
2192 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2193 /** @todo check this on AMD-V */
2194 pSReg->u64Base = 0;
2195 pSReg->u32Limit = 0;
2196 }
2197}
2198
2199
2200/**
2201 * Loads a segment selector during a task switch in V8086 mode.
2202 *
2203 * @param pSReg Pointer to the segment register.
2204 * @param uSel The selector value to load.
2205 */
2206DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2207{
2208 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2209 pSReg->Sel = uSel;
2210 pSReg->ValidSel = uSel;
2211 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2212 pSReg->u64Base = uSel << 4;
2213 pSReg->u32Limit = 0xffff;
2214 pSReg->Attr.u = 0xf3;
2215}
2216
2217
2218/**
2219 * Loads a segment selector during a task switch in protected mode.
2220 *
2221 * In this task switch scenario, we would throw \#TS exceptions rather than
2222 * \#GPs.
2223 *
2224 * @returns VBox strict status code.
2225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2226 * @param pSReg Pointer to the segment register.
2227 * @param uSel The new selector value.
2228 *
2229 * @remarks This does _not_ handle CS or SS.
2230 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2231 */
2232static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2233{
2234 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2235
2236 /* Null data selector. */
2237 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2238 {
2239 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2240 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2241 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2242 return VINF_SUCCESS;
2243 }
2244
2245 /* Fetch the descriptor. */
2246 IEMSELDESC Desc;
2247 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2248 if (rcStrict != VINF_SUCCESS)
2249 {
2250 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2251 VBOXSTRICTRC_VAL(rcStrict)));
2252 return rcStrict;
2253 }
2254
2255 /* Must be a data segment or readable code segment. */
2256 if ( !Desc.Legacy.Gen.u1DescType
2257 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2258 {
2259 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2260 Desc.Legacy.Gen.u4Type));
2261 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2262 }
2263
2264 /* Check privileges for data segments and non-conforming code segments. */
2265 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2266 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2267 {
2268 /* The RPL and the new CPL must be less than or equal to the DPL. */
2269 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2270 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2271 {
2272 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2273 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2274 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2275 }
2276 }
2277
2278 /* Is it there? */
2279 if (!Desc.Legacy.Gen.u1Present)
2280 {
2281 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2282 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2283 }
2284
2285 /* The base and limit. */
2286 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2287 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2288
2289 /*
2290 * Ok, everything checked out fine. Now set the accessed bit before
2291 * committing the result into the registers.
2292 */
2293 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2294 {
2295 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2296 if (rcStrict != VINF_SUCCESS)
2297 return rcStrict;
2298 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2299 }
2300
2301 /* Commit */
2302 pSReg->Sel = uSel;
2303 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2304 pSReg->u32Limit = cbLimit;
2305 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2306 pSReg->ValidSel = uSel;
2307 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2308 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2309 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2310
2311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2312 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2313 return VINF_SUCCESS;
2314}
2315
2316
2317/**
2318 * Performs a task switch.
2319 *
2320 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2321 * caller is responsible for performing the necessary checks (like DPL, TSS
2322 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2323 * reference for JMP, CALL, IRET.
2324 *
2325 * If the task switch is the due to a software interrupt or hardware exception,
2326 * the caller is responsible for validating the TSS selector and descriptor. See
2327 * Intel Instruction reference for INT n.
2328 *
2329 * @returns VBox strict status code.
2330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2331 * @param enmTaskSwitch The cause of the task switch.
2332 * @param uNextEip The EIP effective after the task switch.
2333 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2334 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2335 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2336 * @param SelTss The TSS selector of the new task.
2337 * @param pNewDescTss Pointer to the new TSS descriptor.
2338 */
2339VBOXSTRICTRC
2340iemTaskSwitch(PVMCPUCC pVCpu,
2341 IEMTASKSWITCH enmTaskSwitch,
2342 uint32_t uNextEip,
2343 uint32_t fFlags,
2344 uint16_t uErr,
2345 uint64_t uCr2,
2346 RTSEL SelTss,
2347 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2348{
2349 Assert(!IEM_IS_REAL_MODE(pVCpu));
2350 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2351 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2352
2353 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2354 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2355 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2356 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2357 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2358
2359 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2360 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2361
2362 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2363 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2364
2365 /* Update CR2 in case it's a page-fault. */
2366 /** @todo This should probably be done much earlier in IEM/PGM. See
2367 * @bugref{5653#c49}. */
2368 if (fFlags & IEM_XCPT_FLAGS_CR2)
2369 pVCpu->cpum.GstCtx.cr2 = uCr2;
2370
2371 /*
2372 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2373 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2374 */
2375 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2376 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2377 if (uNewTssLimit < uNewTssLimitMin)
2378 {
2379 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2380 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2381 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2382 }
2383
2384 /*
2385 * Task switches in VMX non-root mode always cause task switches.
2386 * The new TSS must have been read and validated (DPL, limits etc.) before a
2387 * task-switch VM-exit commences.
2388 *
2389 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2390 */
2391 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2392 {
2393 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2394 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2395 }
2396
2397 /*
2398 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2399 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2400 */
2401 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2402 {
2403 uint64_t const uExitInfo1 = SelTss;
2404 uint64_t uExitInfo2 = uErr;
2405 switch (enmTaskSwitch)
2406 {
2407 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2408 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2409 default: break;
2410 }
2411 if (fFlags & IEM_XCPT_FLAGS_ERR)
2412 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2413 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2414 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2415
2416 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2417 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2418 RT_NOREF2(uExitInfo1, uExitInfo2);
2419 }
2420
2421 /*
2422 * Check the current TSS limit. The last written byte to the current TSS during the
2423 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2424 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2425 *
2426 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2427 * end up with smaller than "legal" TSS limits.
2428 */
2429 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2430 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2431 if (uCurTssLimit < uCurTssLimitMin)
2432 {
2433 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2434 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2435 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2436 }
2437
2438 /*
2439 * Verify that the new TSS can be accessed and map it. Map only the required contents
2440 * and not the entire TSS.
2441 */
2442 uint8_t bUnmapInfoNewTss;
2443 void *pvNewTss;
2444 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2445 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2446 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2447 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2448 * not perform correct translation if this happens. See Intel spec. 7.2.1
2449 * "Task-State Segment". */
2450 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2451/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2452 * Consider wrapping the remainder into a function for simpler cleanup. */
2453 if (rcStrict != VINF_SUCCESS)
2454 {
2455 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2456 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2457 return rcStrict;
2458 }
2459
2460 /*
2461 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2462 */
2463 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2464 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2465 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2466 {
2467 uint8_t bUnmapInfoDescCurTss;
2468 PX86DESC pDescCurTss;
2469 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2470 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2471 if (rcStrict != VINF_SUCCESS)
2472 {
2473 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2474 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2475 return rcStrict;
2476 }
2477
2478 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2479 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2480 if (rcStrict != VINF_SUCCESS)
2481 {
2482 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2483 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2484 return rcStrict;
2485 }
2486
2487 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2488 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2489 {
2490 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2491 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2492 fEFlags &= ~X86_EFL_NT;
2493 }
2494 }
2495
2496 /*
2497 * Save the CPU state into the current TSS.
2498 */
2499 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2500 if (GCPtrNewTss == GCPtrCurTss)
2501 {
2502 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2503 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2504 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2505 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2506 pVCpu->cpum.GstCtx.ldtr.Sel));
2507 }
2508 if (fIsNewTss386)
2509 {
2510 /*
2511 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2512 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2513 */
2514 uint8_t bUnmapInfoCurTss32;
2515 void *pvCurTss32;
2516 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2517 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2518 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2519 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2520 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2521 if (rcStrict != VINF_SUCCESS)
2522 {
2523 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2524 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2525 return rcStrict;
2526 }
2527
2528 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2529 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2530 pCurTss32->eip = uNextEip;
2531 pCurTss32->eflags = fEFlags;
2532 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2533 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2534 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2535 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2536 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2537 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2538 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2539 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2540 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2541 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2542 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2543 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2544 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2545 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2546
2547 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2548 if (rcStrict != VINF_SUCCESS)
2549 {
2550 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2551 VBOXSTRICTRC_VAL(rcStrict)));
2552 return rcStrict;
2553 }
2554 }
2555 else
2556 {
2557 /*
2558 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2559 */
2560 uint8_t bUnmapInfoCurTss16;
2561 void *pvCurTss16;
2562 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2563 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2564 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2565 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2566 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2567 if (rcStrict != VINF_SUCCESS)
2568 {
2569 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2570 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2571 return rcStrict;
2572 }
2573
2574 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2575 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2576 pCurTss16->ip = uNextEip;
2577 pCurTss16->flags = (uint16_t)fEFlags;
2578 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2579 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2580 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2581 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2582 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2583 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2584 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2585 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2586 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2587 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2588 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2589 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2590
2591 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2592 if (rcStrict != VINF_SUCCESS)
2593 {
2594 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2595 VBOXSTRICTRC_VAL(rcStrict)));
2596 return rcStrict;
2597 }
2598 }
2599
2600 /*
2601 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2602 */
2603 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2604 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2605 {
2606 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2607 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2608 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2609 }
2610
2611 /*
2612 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2613 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2614 */
2615 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2616 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2617 bool fNewDebugTrap;
2618 if (fIsNewTss386)
2619 {
2620 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2621 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2622 uNewEip = pNewTss32->eip;
2623 uNewEflags = pNewTss32->eflags;
2624 uNewEax = pNewTss32->eax;
2625 uNewEcx = pNewTss32->ecx;
2626 uNewEdx = pNewTss32->edx;
2627 uNewEbx = pNewTss32->ebx;
2628 uNewEsp = pNewTss32->esp;
2629 uNewEbp = pNewTss32->ebp;
2630 uNewEsi = pNewTss32->esi;
2631 uNewEdi = pNewTss32->edi;
2632 uNewES = pNewTss32->es;
2633 uNewCS = pNewTss32->cs;
2634 uNewSS = pNewTss32->ss;
2635 uNewDS = pNewTss32->ds;
2636 uNewFS = pNewTss32->fs;
2637 uNewGS = pNewTss32->gs;
2638 uNewLdt = pNewTss32->selLdt;
2639 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2640 }
2641 else
2642 {
2643 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2644 uNewCr3 = 0;
2645 uNewEip = pNewTss16->ip;
2646 uNewEflags = pNewTss16->flags;
2647 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2648 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2649 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2650 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2651 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2652 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2653 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2654 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2655 uNewES = pNewTss16->es;
2656 uNewCS = pNewTss16->cs;
2657 uNewSS = pNewTss16->ss;
2658 uNewDS = pNewTss16->ds;
2659 uNewFS = 0;
2660 uNewGS = 0;
2661 uNewLdt = pNewTss16->selLdt;
2662 fNewDebugTrap = false;
2663 }
2664
2665 if (GCPtrNewTss == GCPtrCurTss)
2666 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2667 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2668
2669 /*
2670 * We're done accessing the new TSS.
2671 */
2672 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2673 if (rcStrict != VINF_SUCCESS)
2674 {
2675 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2676 return rcStrict;
2677 }
2678
2679 /*
2680 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2681 */
2682 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2683 {
2684 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2685 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2686 if (rcStrict != VINF_SUCCESS)
2687 {
2688 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2689 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2690 return rcStrict;
2691 }
2692
2693 /* Check that the descriptor indicates the new TSS is available (not busy). */
2694 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2695 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2696 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2697
2698 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2699 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2700 if (rcStrict != VINF_SUCCESS)
2701 {
2702 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2703 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2704 return rcStrict;
2705 }
2706 }
2707
2708 /*
2709 * From this point on, we're technically in the new task. We will defer exceptions
2710 * until the completion of the task switch but before executing any instructions in the new task.
2711 */
2712 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2713 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2714 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2715 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2716 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2717 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2718 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2719
2720 /* Set the busy bit in TR. */
2721 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2722
2723 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2724 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2725 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2726 {
2727 uNewEflags |= X86_EFL_NT;
2728 }
2729
2730 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2731 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2732 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2733
2734 pVCpu->cpum.GstCtx.eip = uNewEip;
2735 pVCpu->cpum.GstCtx.eax = uNewEax;
2736 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2737 pVCpu->cpum.GstCtx.edx = uNewEdx;
2738 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2739 pVCpu->cpum.GstCtx.esp = uNewEsp;
2740 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2741 pVCpu->cpum.GstCtx.esi = uNewEsi;
2742 pVCpu->cpum.GstCtx.edi = uNewEdi;
2743
2744 uNewEflags &= X86_EFL_LIVE_MASK;
2745 uNewEflags |= X86_EFL_RA1_MASK;
2746 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2747
2748 /*
2749 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2750 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2751 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2752 */
2753 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2754 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2755
2756 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2757 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2758
2759 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2760 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2761
2762 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2763 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2764
2765 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2766 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2767
2768 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2769 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2770 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2771
2772 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2773 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2774 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2775 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2776
2777 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2778 {
2779 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2780 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2781 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2782 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2783 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2784 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2785 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2786 }
2787
2788 /*
2789 * Switch CR3 for the new task.
2790 */
2791 if ( fIsNewTss386
2792 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2793 {
2794 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2795 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2796 AssertRCSuccessReturn(rc, rc);
2797
2798 /* Inform PGM. */
2799 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2800 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2801 AssertRCReturn(rc, rc);
2802 /* ignore informational status codes */
2803
2804 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2805 }
2806
2807 /*
2808 * Switch LDTR for the new task.
2809 */
2810 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2811 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2812 else
2813 {
2814 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2815
2816 IEMSELDESC DescNewLdt;
2817 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2818 if (rcStrict != VINF_SUCCESS)
2819 {
2820 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2821 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2822 return rcStrict;
2823 }
2824 if ( !DescNewLdt.Legacy.Gen.u1Present
2825 || DescNewLdt.Legacy.Gen.u1DescType
2826 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2827 {
2828 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2829 uNewLdt, DescNewLdt.Legacy.u));
2830 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2831 }
2832
2833 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2834 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2835 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2836 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2837 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2838 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2839 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2840 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2841 }
2842
2843 IEMSELDESC DescSS;
2844 if (IEM_IS_V86_MODE(pVCpu))
2845 {
2846 IEM_SET_CPL(pVCpu, 3);
2847 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2848 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2849 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2850 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2851 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2852 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2853
2854 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2855 DescSS.Legacy.u = 0;
2856 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2857 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2858 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2859 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2860 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2861 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2862 DescSS.Legacy.Gen.u2Dpl = 3;
2863 }
2864 else
2865 {
2866 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2867
2868 /*
2869 * Load the stack segment for the new task.
2870 */
2871 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2872 {
2873 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2874 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2875 }
2876
2877 /* Fetch the descriptor. */
2878 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2879 if (rcStrict != VINF_SUCCESS)
2880 {
2881 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2882 VBOXSTRICTRC_VAL(rcStrict)));
2883 return rcStrict;
2884 }
2885
2886 /* SS must be a data segment and writable. */
2887 if ( !DescSS.Legacy.Gen.u1DescType
2888 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2889 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2890 {
2891 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2892 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2894 }
2895
2896 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2897 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2898 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2899 {
2900 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2901 uNewCpl));
2902 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2903 }
2904
2905 /* Is it there? */
2906 if (!DescSS.Legacy.Gen.u1Present)
2907 {
2908 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2909 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2910 }
2911
2912 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2913 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2914
2915 /* Set the accessed bit before committing the result into SS. */
2916 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2917 {
2918 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2919 if (rcStrict != VINF_SUCCESS)
2920 return rcStrict;
2921 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2922 }
2923
2924 /* Commit SS. */
2925 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2926 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2927 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2928 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2929 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2930 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2931 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2932
2933 /* CPL has changed, update IEM before loading rest of segments. */
2934 IEM_SET_CPL(pVCpu, uNewCpl);
2935
2936 /*
2937 * Load the data segments for the new task.
2938 */
2939 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2940 if (rcStrict != VINF_SUCCESS)
2941 return rcStrict;
2942 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2943 if (rcStrict != VINF_SUCCESS)
2944 return rcStrict;
2945 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2946 if (rcStrict != VINF_SUCCESS)
2947 return rcStrict;
2948 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2949 if (rcStrict != VINF_SUCCESS)
2950 return rcStrict;
2951
2952 /*
2953 * Load the code segment for the new task.
2954 */
2955 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2956 {
2957 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2958 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2959 }
2960
2961 /* Fetch the descriptor. */
2962 IEMSELDESC DescCS;
2963 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2964 if (rcStrict != VINF_SUCCESS)
2965 {
2966 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2967 return rcStrict;
2968 }
2969
2970 /* CS must be a code segment. */
2971 if ( !DescCS.Legacy.Gen.u1DescType
2972 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2973 {
2974 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2975 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2976 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2977 }
2978
2979 /* For conforming CS, DPL must be less than or equal to the RPL. */
2980 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2981 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2982 {
2983 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2984 DescCS.Legacy.Gen.u2Dpl));
2985 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2986 }
2987
2988 /* For non-conforming CS, DPL must match RPL. */
2989 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2990 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2991 {
2992 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2993 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2994 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2995 }
2996
2997 /* Is it there? */
2998 if (!DescCS.Legacy.Gen.u1Present)
2999 {
3000 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3001 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3002 }
3003
3004 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3005 u64Base = X86DESC_BASE(&DescCS.Legacy);
3006
3007 /* Set the accessed bit before committing the result into CS. */
3008 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3009 {
3010 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3011 if (rcStrict != VINF_SUCCESS)
3012 return rcStrict;
3013 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3014 }
3015
3016 /* Commit CS. */
3017 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3018 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3019 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3020 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3021 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3022 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3024 }
3025
3026 /* Make sure the CPU mode is correct. */
3027 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3028 if (fExecNew != pVCpu->iem.s.fExec)
3029 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3030 pVCpu->iem.s.fExec = fExecNew;
3031
3032 /** @todo Debug trap. */
3033 if (fIsNewTss386 && fNewDebugTrap)
3034 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3035
3036 /*
3037 * Construct the error code masks based on what caused this task switch.
3038 * See Intel Instruction reference for INT.
3039 */
3040 uint16_t uExt;
3041 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3042 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3043 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3044 uExt = 1;
3045 else
3046 uExt = 0;
3047
3048 /*
3049 * Push any error code on to the new stack.
3050 */
3051 if (fFlags & IEM_XCPT_FLAGS_ERR)
3052 {
3053 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3054 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3055 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3056
3057 /* Check that there is sufficient space on the stack. */
3058 /** @todo Factor out segment limit checking for normal/expand down segments
3059 * into a separate function. */
3060 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3061 {
3062 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3063 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3064 {
3065 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3066 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3067 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3068 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3069 }
3070 }
3071 else
3072 {
3073 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3074 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3075 {
3076 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3077 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3078 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3079 }
3080 }
3081
3082
3083 if (fIsNewTss386)
3084 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3085 else
3086 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3087 if (rcStrict != VINF_SUCCESS)
3088 {
3089 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3090 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3091 return rcStrict;
3092 }
3093 }
3094
3095 /* Check the new EIP against the new CS limit. */
3096 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3097 {
3098 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3099 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3100 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3101 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3102 }
3103
3104 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3105 pVCpu->cpum.GstCtx.ss.Sel));
3106 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3107}
3108
3109
3110/**
3111 * Implements exceptions and interrupts for protected mode.
3112 *
3113 * @returns VBox strict status code.
3114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3115 * @param cbInstr The number of bytes to offset rIP by in the return
3116 * address.
3117 * @param u8Vector The interrupt / exception vector number.
3118 * @param fFlags The flags.
3119 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3120 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3121 */
3122static VBOXSTRICTRC
3123iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3124 uint8_t cbInstr,
3125 uint8_t u8Vector,
3126 uint32_t fFlags,
3127 uint16_t uErr,
3128 uint64_t uCr2) RT_NOEXCEPT
3129{
3130 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3131
3132 /*
3133 * Read the IDT entry.
3134 */
3135 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3136 {
3137 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3138 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3139 }
3140 X86DESC Idte;
3141 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3142 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3143 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3144 {
3145 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3146 return rcStrict;
3147 }
3148 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3149 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3150 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3151 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3152
3153 /*
3154 * Check the descriptor type, DPL and such.
3155 * ASSUMES this is done in the same order as described for call-gate calls.
3156 */
3157 if (Idte.Gate.u1DescType)
3158 {
3159 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3160 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3161 }
3162 bool fTaskGate = false;
3163 uint8_t f32BitGate = true;
3164 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3165 switch (Idte.Gate.u4Type)
3166 {
3167 case X86_SEL_TYPE_SYS_UNDEFINED:
3168 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3169 case X86_SEL_TYPE_SYS_LDT:
3170 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3171 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3172 case X86_SEL_TYPE_SYS_UNDEFINED2:
3173 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3174 case X86_SEL_TYPE_SYS_UNDEFINED3:
3175 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3176 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3177 case X86_SEL_TYPE_SYS_UNDEFINED4:
3178 {
3179 /** @todo check what actually happens when the type is wrong...
3180 * esp. call gates. */
3181 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3182 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3183 }
3184
3185 case X86_SEL_TYPE_SYS_286_INT_GATE:
3186 f32BitGate = false;
3187 RT_FALL_THRU();
3188 case X86_SEL_TYPE_SYS_386_INT_GATE:
3189 fEflToClear |= X86_EFL_IF;
3190 break;
3191
3192 case X86_SEL_TYPE_SYS_TASK_GATE:
3193 fTaskGate = true;
3194#ifndef IEM_IMPLEMENTS_TASKSWITCH
3195 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3196#endif
3197 break;
3198
3199 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3200 f32BitGate = false;
3201 break;
3202 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3203 break;
3204
3205 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3206 }
3207
3208 /* Check DPL against CPL if applicable. */
3209 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3210 {
3211 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3212 {
3213 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3214 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3215 }
3216 }
3217
3218 /* Is it there? */
3219 if (!Idte.Gate.u1Present)
3220 {
3221 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3222 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3223 }
3224
3225 /* Is it a task-gate? */
3226 if (fTaskGate)
3227 {
3228 /*
3229 * Construct the error code masks based on what caused this task switch.
3230 * See Intel Instruction reference for INT.
3231 */
3232 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3233 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3234 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3235 RTSEL SelTss = Idte.Gate.u16Sel;
3236
3237 /*
3238 * Fetch the TSS descriptor in the GDT.
3239 */
3240 IEMSELDESC DescTSS;
3241 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3242 if (rcStrict != VINF_SUCCESS)
3243 {
3244 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3245 VBOXSTRICTRC_VAL(rcStrict)));
3246 return rcStrict;
3247 }
3248
3249 /* The TSS descriptor must be a system segment and be available (not busy). */
3250 if ( DescTSS.Legacy.Gen.u1DescType
3251 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3252 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3253 {
3254 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3255 u8Vector, SelTss, DescTSS.Legacy.au64));
3256 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3257 }
3258
3259 /* The TSS must be present. */
3260 if (!DescTSS.Legacy.Gen.u1Present)
3261 {
3262 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3263 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3264 }
3265
3266 /* Do the actual task switch. */
3267 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3268 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3269 fFlags, uErr, uCr2, SelTss, &DescTSS);
3270 }
3271
3272 /* A null CS is bad. */
3273 RTSEL NewCS = Idte.Gate.u16Sel;
3274 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3275 {
3276 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3277 return iemRaiseGeneralProtectionFault0(pVCpu);
3278 }
3279
3280 /* Fetch the descriptor for the new CS. */
3281 IEMSELDESC DescCS;
3282 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3283 if (rcStrict != VINF_SUCCESS)
3284 {
3285 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3286 return rcStrict;
3287 }
3288
3289 /* Must be a code segment. */
3290 if (!DescCS.Legacy.Gen.u1DescType)
3291 {
3292 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3293 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3294 }
3295 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3296 {
3297 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3298 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3299 }
3300
3301 /* Don't allow lowering the privilege level. */
3302 /** @todo Does the lowering of privileges apply to software interrupts
3303 * only? This has bearings on the more-privileged or
3304 * same-privilege stack behavior further down. A testcase would
3305 * be nice. */
3306 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3307 {
3308 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3309 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3310 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3311 }
3312
3313 /* Make sure the selector is present. */
3314 if (!DescCS.Legacy.Gen.u1Present)
3315 {
3316 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3317 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3318 }
3319
3320#ifdef LOG_ENABLED
3321 /* If software interrupt, try decode it if logging is enabled and such. */
3322 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3323 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3324 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3325#endif
3326
3327 /* Check the new EIP against the new CS limit. */
3328 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3329 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3330 ? Idte.Gate.u16OffsetLow
3331 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3332 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3333 if (uNewEip > cbLimitCS)
3334 {
3335 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3336 u8Vector, uNewEip, cbLimitCS, NewCS));
3337 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3338 }
3339 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3340
3341 /* Calc the flag image to push. */
3342 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3343 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3344 fEfl &= ~X86_EFL_RF;
3345 else
3346 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3347
3348 /* From V8086 mode only go to CPL 0. */
3349 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3350 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3351 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3352 {
3353 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3354 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3355 }
3356
3357 /*
3358 * If the privilege level changes, we need to get a new stack from the TSS.
3359 * This in turns means validating the new SS and ESP...
3360 */
3361 if (uNewCpl != IEM_GET_CPL(pVCpu))
3362 {
3363 RTSEL NewSS;
3364 uint32_t uNewEsp;
3365 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3366 if (rcStrict != VINF_SUCCESS)
3367 return rcStrict;
3368
3369 IEMSELDESC DescSS;
3370 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3371 if (rcStrict != VINF_SUCCESS)
3372 return rcStrict;
3373 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3374 if (!DescSS.Legacy.Gen.u1DefBig)
3375 {
3376 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3377 uNewEsp = (uint16_t)uNewEsp;
3378 }
3379
3380 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3381
3382 /* Check that there is sufficient space for the stack frame. */
3383 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3384 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3385 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3386 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3387
3388 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3389 {
3390 if ( uNewEsp - 1 > cbLimitSS
3391 || uNewEsp < cbStackFrame)
3392 {
3393 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3394 u8Vector, NewSS, uNewEsp, cbStackFrame));
3395 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3396 }
3397 }
3398 else
3399 {
3400 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3401 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3402 {
3403 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3404 u8Vector, NewSS, uNewEsp, cbStackFrame));
3405 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3406 }
3407 }
3408
3409 /*
3410 * Start making changes.
3411 */
3412
3413 /* Set the new CPL so that stack accesses use it. */
3414 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3415 IEM_SET_CPL(pVCpu, uNewCpl);
3416
3417 /* Create the stack frame. */
3418 uint8_t bUnmapInfoStackFrame;
3419 RTPTRUNION uStackFrame;
3420 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3421 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3422 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3423 if (rcStrict != VINF_SUCCESS)
3424 return rcStrict;
3425 if (f32BitGate)
3426 {
3427 if (fFlags & IEM_XCPT_FLAGS_ERR)
3428 *uStackFrame.pu32++ = uErr;
3429 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3430 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3431 uStackFrame.pu32[2] = fEfl;
3432 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3433 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3434 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3435 if (fEfl & X86_EFL_VM)
3436 {
3437 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3438 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3439 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3440 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3441 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3442 }
3443 }
3444 else
3445 {
3446 if (fFlags & IEM_XCPT_FLAGS_ERR)
3447 *uStackFrame.pu16++ = uErr;
3448 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3449 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3450 uStackFrame.pu16[2] = fEfl;
3451 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3452 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3453 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3454 if (fEfl & X86_EFL_VM)
3455 {
3456 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3457 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3458 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3459 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3460 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3461 }
3462 }
3463 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3464 if (rcStrict != VINF_SUCCESS)
3465 return rcStrict;
3466
3467 /* Mark the selectors 'accessed' (hope this is the correct time). */
3468 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3469 * after pushing the stack frame? (Write protect the gdt + stack to
3470 * find out.) */
3471 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3472 {
3473 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3474 if (rcStrict != VINF_SUCCESS)
3475 return rcStrict;
3476 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3477 }
3478
3479 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3480 {
3481 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3482 if (rcStrict != VINF_SUCCESS)
3483 return rcStrict;
3484 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3485 }
3486
3487 /*
3488 * Start comitting the register changes (joins with the DPL=CPL branch).
3489 */
3490 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3491 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3492 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3493 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3494 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3495 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3496 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3497 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3498 * SP is loaded).
3499 * Need to check the other combinations too:
3500 * - 16-bit TSS, 32-bit handler
3501 * - 32-bit TSS, 16-bit handler */
3502 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3503 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3504 else
3505 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3506
3507 if (fEfl & X86_EFL_VM)
3508 {
3509 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3510 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3511 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3512 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3513 }
3514 }
3515 /*
3516 * Same privilege, no stack change and smaller stack frame.
3517 */
3518 else
3519 {
3520 uint64_t uNewRsp;
3521 uint8_t bUnmapInfoStackFrame;
3522 RTPTRUNION uStackFrame;
3523 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3524 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3525 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3526 if (rcStrict != VINF_SUCCESS)
3527 return rcStrict;
3528
3529 if (f32BitGate)
3530 {
3531 if (fFlags & IEM_XCPT_FLAGS_ERR)
3532 *uStackFrame.pu32++ = uErr;
3533 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3534 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3535 uStackFrame.pu32[2] = fEfl;
3536 }
3537 else
3538 {
3539 if (fFlags & IEM_XCPT_FLAGS_ERR)
3540 *uStackFrame.pu16++ = uErr;
3541 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3542 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3543 uStackFrame.pu16[2] = fEfl;
3544 }
3545 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3546 if (rcStrict != VINF_SUCCESS)
3547 return rcStrict;
3548
3549 /* Mark the CS selector as 'accessed'. */
3550 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3551 {
3552 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3553 if (rcStrict != VINF_SUCCESS)
3554 return rcStrict;
3555 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3556 }
3557
3558 /*
3559 * Start committing the register changes (joins with the other branch).
3560 */
3561 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3562 }
3563
3564 /* ... register committing continues. */
3565 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3566 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3567 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3568 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3569 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3570 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3571
3572 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3573 fEfl &= ~fEflToClear;
3574 IEMMISC_SET_EFL(pVCpu, fEfl);
3575
3576 if (fFlags & IEM_XCPT_FLAGS_CR2)
3577 pVCpu->cpum.GstCtx.cr2 = uCr2;
3578
3579 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3580 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3581
3582 /* Make sure the execution flags are correct. */
3583 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3584 if (fExecNew != pVCpu->iem.s.fExec)
3585 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3586 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3587 pVCpu->iem.s.fExec = fExecNew;
3588 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3589
3590 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3591}
3592
3593
3594/**
3595 * Implements exceptions and interrupts for long mode.
3596 *
3597 * @returns VBox strict status code.
3598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3599 * @param cbInstr The number of bytes to offset rIP by in the return
3600 * address.
3601 * @param u8Vector The interrupt / exception vector number.
3602 * @param fFlags The flags.
3603 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3604 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3605 */
3606static VBOXSTRICTRC
3607iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3608 uint8_t cbInstr,
3609 uint8_t u8Vector,
3610 uint32_t fFlags,
3611 uint16_t uErr,
3612 uint64_t uCr2) RT_NOEXCEPT
3613{
3614 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3615
3616 /*
3617 * Read the IDT entry.
3618 */
3619 uint16_t offIdt = (uint16_t)u8Vector << 4;
3620 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3621 {
3622 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3623 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3624 }
3625 X86DESC64 Idte;
3626#ifdef _MSC_VER /* Shut up silly compiler warning. */
3627 Idte.au64[0] = 0;
3628 Idte.au64[1] = 0;
3629#endif
3630 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3631 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3632 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3633 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3634 {
3635 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3636 return rcStrict;
3637 }
3638 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3639 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3640 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3641
3642 /*
3643 * Check the descriptor type, DPL and such.
3644 * ASSUMES this is done in the same order as described for call-gate calls.
3645 */
3646 if (Idte.Gate.u1DescType)
3647 {
3648 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3649 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3650 }
3651 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3652 switch (Idte.Gate.u4Type)
3653 {
3654 case AMD64_SEL_TYPE_SYS_INT_GATE:
3655 fEflToClear |= X86_EFL_IF;
3656 break;
3657 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3658 break;
3659
3660 default:
3661 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3662 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3663 }
3664
3665 /* Check DPL against CPL if applicable. */
3666 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3667 {
3668 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3669 {
3670 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3671 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3672 }
3673 }
3674
3675 /* Is it there? */
3676 if (!Idte.Gate.u1Present)
3677 {
3678 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3679 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3680 }
3681
3682 /* A null CS is bad. */
3683 RTSEL NewCS = Idte.Gate.u16Sel;
3684 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3685 {
3686 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3687 return iemRaiseGeneralProtectionFault0(pVCpu);
3688 }
3689
3690 /* Fetch the descriptor for the new CS. */
3691 IEMSELDESC DescCS;
3692 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3693 if (rcStrict != VINF_SUCCESS)
3694 {
3695 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3696 return rcStrict;
3697 }
3698
3699 /* Must be a 64-bit code segment. */
3700 if (!DescCS.Long.Gen.u1DescType)
3701 {
3702 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3703 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3704 }
3705 if ( !DescCS.Long.Gen.u1Long
3706 || DescCS.Long.Gen.u1DefBig
3707 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3708 {
3709 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3710 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3711 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3712 }
3713
3714 /* Don't allow lowering the privilege level. For non-conforming CS
3715 selectors, the CS.DPL sets the privilege level the trap/interrupt
3716 handler runs at. For conforming CS selectors, the CPL remains
3717 unchanged, but the CS.DPL must be <= CPL. */
3718 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3719 * when CPU in Ring-0. Result \#GP? */
3720 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3721 {
3722 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3723 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3724 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3725 }
3726
3727
3728 /* Make sure the selector is present. */
3729 if (!DescCS.Legacy.Gen.u1Present)
3730 {
3731 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3732 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3733 }
3734
3735 /* Check that the new RIP is canonical. */
3736 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3737 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3738 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3739 if (!IEM_IS_CANONICAL(uNewRip))
3740 {
3741 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3742 return iemRaiseGeneralProtectionFault0(pVCpu);
3743 }
3744
3745 /*
3746 * If the privilege level changes or if the IST isn't zero, we need to get
3747 * a new stack from the TSS.
3748 */
3749 uint64_t uNewRsp;
3750 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3751 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3752 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3753 || Idte.Gate.u3IST != 0)
3754 {
3755 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3756 if (rcStrict != VINF_SUCCESS)
3757 return rcStrict;
3758 }
3759 else
3760 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3761 uNewRsp &= ~(uint64_t)0xf;
3762
3763 /*
3764 * Calc the flag image to push.
3765 */
3766 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3767 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3768 fEfl &= ~X86_EFL_RF;
3769 else
3770 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3771
3772 /*
3773 * Start making changes.
3774 */
3775 /* Set the new CPL so that stack accesses use it. */
3776 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3777 IEM_SET_CPL(pVCpu, uNewCpl);
3778/** @todo Setting CPL this early seems wrong as it would affect and errors we
3779 * raise accessing the stack and (?) GDT/LDT... */
3780
3781 /* Create the stack frame. */
3782 uint8_t bUnmapInfoStackFrame;
3783 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3784 RTPTRUNION uStackFrame;
3785 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3786 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3787 if (rcStrict != VINF_SUCCESS)
3788 return rcStrict;
3789
3790 if (fFlags & IEM_XCPT_FLAGS_ERR)
3791 *uStackFrame.pu64++ = uErr;
3792 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3793 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3794 uStackFrame.pu64[2] = fEfl;
3795 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3796 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3797 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3798 if (rcStrict != VINF_SUCCESS)
3799 return rcStrict;
3800
3801 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3802 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3803 * after pushing the stack frame? (Write protect the gdt + stack to
3804 * find out.) */
3805 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3806 {
3807 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3808 if (rcStrict != VINF_SUCCESS)
3809 return rcStrict;
3810 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3811 }
3812
3813 /*
3814 * Start comitting the register changes.
3815 */
3816 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3817 * hidden registers when interrupting 32-bit or 16-bit code! */
3818 if (uNewCpl != uOldCpl)
3819 {
3820 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3821 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3822 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3823 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3824 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3825 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3826 }
3827 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3828 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3829 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3830 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3831 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3832 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3833 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3834 pVCpu->cpum.GstCtx.rip = uNewRip;
3835
3836 fEfl &= ~fEflToClear;
3837 IEMMISC_SET_EFL(pVCpu, fEfl);
3838
3839 if (fFlags & IEM_XCPT_FLAGS_CR2)
3840 pVCpu->cpum.GstCtx.cr2 = uCr2;
3841
3842 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3843 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3844
3845 iemRecalcExecModeAndCplFlags(pVCpu);
3846
3847 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3848}
3849
3850
3851/**
3852 * Implements exceptions and interrupts.
3853 *
3854 * All exceptions and interrupts goes thru this function!
3855 *
3856 * @returns VBox strict status code.
3857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3858 * @param cbInstr The number of bytes to offset rIP by in the return
3859 * address.
3860 * @param u8Vector The interrupt / exception vector number.
3861 * @param fFlags The flags.
3862 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3863 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3864 */
3865VBOXSTRICTRC
3866iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3867 uint8_t cbInstr,
3868 uint8_t u8Vector,
3869 uint32_t fFlags,
3870 uint16_t uErr,
3871 uint64_t uCr2) RT_NOEXCEPT
3872{
3873 /*
3874 * Get all the state that we might need here.
3875 */
3876 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3877 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3878
3879#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3880 /*
3881 * Flush prefetch buffer
3882 */
3883 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3884#endif
3885
3886 /*
3887 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3888 */
3889 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3890 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3891 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3892 | IEM_XCPT_FLAGS_BP_INSTR
3893 | IEM_XCPT_FLAGS_ICEBP_INSTR
3894 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3895 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3896 {
3897 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3898 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3899 u8Vector = X86_XCPT_GP;
3900 uErr = 0;
3901 }
3902
3903 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3904#ifdef DBGFTRACE_ENABLED
3905 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3906 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3907 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3908#endif
3909
3910 /*
3911 * Check if DBGF wants to intercept the exception.
3912 */
3913 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3914 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3915 { /* likely */ }
3916 else
3917 {
3918 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3919 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3920 if (rcStrict != VINF_SUCCESS)
3921 return rcStrict;
3922 }
3923
3924 /*
3925 * Evaluate whether NMI blocking should be in effect.
3926 * Normally, NMI blocking is in effect whenever we inject an NMI.
3927 */
3928 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3929 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3930
3931#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3932 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3933 {
3934 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3935 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3936 return rcStrict0;
3937
3938 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3939 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3940 {
3941 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3942 fBlockNmi = false;
3943 }
3944 }
3945#endif
3946
3947#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3948 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3949 {
3950 /*
3951 * If the event is being injected as part of VMRUN, it isn't subject to event
3952 * intercepts in the nested-guest. However, secondary exceptions that occur
3953 * during injection of any event -are- subject to exception intercepts.
3954 *
3955 * See AMD spec. 15.20 "Event Injection".
3956 */
3957 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3958 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3959 else
3960 {
3961 /*
3962 * Check and handle if the event being raised is intercepted.
3963 */
3964 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3965 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3966 return rcStrict0;
3967 }
3968 }
3969#endif
3970
3971 /*
3972 * Set NMI blocking if necessary.
3973 */
3974 if (fBlockNmi)
3975 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3976
3977 /*
3978 * Do recursion accounting.
3979 */
3980 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3981 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3982 if (pVCpu->iem.s.cXcptRecursions == 0)
3983 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3984 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3985 else
3986 {
3987 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3988 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3989 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3990
3991 if (pVCpu->iem.s.cXcptRecursions >= 4)
3992 {
3993#ifdef DEBUG_bird
3994 AssertFailed();
3995#endif
3996 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3997 }
3998
3999 /*
4000 * Evaluate the sequence of recurring events.
4001 */
4002 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4003 NULL /* pXcptRaiseInfo */);
4004 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4005 { /* likely */ }
4006 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4007 {
4008 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4009 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4010 u8Vector = X86_XCPT_DF;
4011 uErr = 0;
4012#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4013 /* VMX nested-guest #DF intercept needs to be checked here. */
4014 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4015 {
4016 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4017 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4018 return rcStrict0;
4019 }
4020#endif
4021 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4022 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4023 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4024 }
4025 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4026 {
4027 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4028 return iemInitiateCpuShutdown(pVCpu);
4029 }
4030 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4031 {
4032 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4033 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4034 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4035 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4036 return VERR_EM_GUEST_CPU_HANG;
4037 }
4038 else
4039 {
4040 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4041 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4042 return VERR_IEM_IPE_9;
4043 }
4044
4045 /*
4046 * The 'EXT' bit is set when an exception occurs during deliver of an external
4047 * event (such as an interrupt or earlier exception)[1]. Privileged software
4048 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4049 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4050 *
4051 * [1] - Intel spec. 6.13 "Error Code"
4052 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4053 * [3] - Intel Instruction reference for INT n.
4054 */
4055 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4056 && (fFlags & IEM_XCPT_FLAGS_ERR)
4057 && u8Vector != X86_XCPT_PF
4058 && u8Vector != X86_XCPT_DF)
4059 {
4060 uErr |= X86_TRAP_ERR_EXTERNAL;
4061 }
4062 }
4063
4064 pVCpu->iem.s.cXcptRecursions++;
4065 pVCpu->iem.s.uCurXcpt = u8Vector;
4066 pVCpu->iem.s.fCurXcpt = fFlags;
4067 pVCpu->iem.s.uCurXcptErr = uErr;
4068 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4069
4070 /*
4071 * Extensive logging.
4072 */
4073#if defined(LOG_ENABLED) && defined(IN_RING3)
4074 if (LogIs3Enabled())
4075 {
4076 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4077 char szRegs[4096];
4078 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4079 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4080 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4081 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4082 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4083 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4084 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4085 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4086 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4087 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4088 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4089 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4090 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4091 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4092 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4093 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4094 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4095 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4096 " efer=%016VR{efer}\n"
4097 " pat=%016VR{pat}\n"
4098 " sf_mask=%016VR{sf_mask}\n"
4099 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4100 " lstar=%016VR{lstar}\n"
4101 " star=%016VR{star} cstar=%016VR{cstar}\n"
4102 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4103 );
4104
4105 char szInstr[256];
4106 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4107 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4108 szInstr, sizeof(szInstr), NULL);
4109 Log3(("%s%s\n", szRegs, szInstr));
4110 }
4111#endif /* LOG_ENABLED */
4112
4113 /*
4114 * Stats.
4115 */
4116 uint64_t const uTimestamp = ASMReadTSC();
4117 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4118 {
4119 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4120 EMHistoryAddExit(pVCpu,
4121 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4122 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4123 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4124 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4125 }
4126 else
4127 {
4128 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4129 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4130 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4131 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4132 if (fFlags & IEM_XCPT_FLAGS_ERR)
4133 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4134 if (fFlags & IEM_XCPT_FLAGS_CR2)
4135 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4136 }
4137
4138 /*
4139 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4140 * to ensure that a stale TLB or paging cache entry will only cause one
4141 * spurious #PF.
4142 */
4143 if ( u8Vector == X86_XCPT_PF
4144 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4145 IEMTlbInvalidatePage(pVCpu, uCr2);
4146
4147 /*
4148 * Call the mode specific worker function.
4149 */
4150 VBOXSTRICTRC rcStrict;
4151 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4152 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4153 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4154 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4155 else
4156 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4157
4158 /* Flush the prefetch buffer. */
4159 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4160
4161 /*
4162 * Unwind.
4163 */
4164 pVCpu->iem.s.cXcptRecursions--;
4165 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4166 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4167 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4168 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4169 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4170 return rcStrict;
4171}
4172
4173#ifdef IEM_WITH_SETJMP
4174/**
4175 * See iemRaiseXcptOrInt. Will not return.
4176 */
4177DECL_NO_RETURN(void)
4178iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4179 uint8_t cbInstr,
4180 uint8_t u8Vector,
4181 uint32_t fFlags,
4182 uint16_t uErr,
4183 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4184{
4185 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4186 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4187}
4188#endif
4189
4190
4191/** \#DE - 00. */
4192VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4193{
4194 if (GCMIsInterceptingXcptDE(pVCpu))
4195 {
4196 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4197 if (rc == VINF_SUCCESS)
4198 {
4199 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4200 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4201 }
4202 }
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4204}
4205
4206
4207#ifdef IEM_WITH_SETJMP
4208/** \#DE - 00. */
4209DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4210{
4211 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4212}
4213#endif
4214
4215
4216/** \#DB - 01.
4217 * @note This automatically clear DR7.GD. */
4218VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4219{
4220 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4221 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4222 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4223}
4224
4225
4226/** \#BR - 05. */
4227VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4228{
4229 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4230}
4231
4232
4233/** \#UD - 06. */
4234VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4235{
4236 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4237}
4238
4239
4240#ifdef IEM_WITH_SETJMP
4241/** \#UD - 06. */
4242DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4243{
4244 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4245}
4246#endif
4247
4248
4249/** \#NM - 07. */
4250VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4251{
4252 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4253}
4254
4255
4256#ifdef IEM_WITH_SETJMP
4257/** \#NM - 07. */
4258DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4259{
4260 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4261}
4262#endif
4263
4264
4265/** \#TS(err) - 0a. */
4266VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4267{
4268 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4269}
4270
4271
4272/** \#TS(tr) - 0a. */
4273VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4274{
4275 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4276 pVCpu->cpum.GstCtx.tr.Sel, 0);
4277}
4278
4279
4280/** \#TS(0) - 0a. */
4281VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4282{
4283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4284 0, 0);
4285}
4286
4287
4288/** \#TS(err) - 0a. */
4289VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4290{
4291 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4292 uSel & X86_SEL_MASK_OFF_RPL, 0);
4293}
4294
4295
4296/** \#NP(err) - 0b. */
4297VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4298{
4299 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4300}
4301
4302
4303/** \#NP(sel) - 0b. */
4304VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4305{
4306 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4307 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4308 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4309 uSel & ~X86_SEL_RPL, 0);
4310}
4311
4312
4313/** \#SS(seg) - 0c. */
4314VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4315{
4316 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4317 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4318 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4319 uSel & ~X86_SEL_RPL, 0);
4320}
4321
4322
4323/** \#SS(err) - 0c. */
4324VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4325{
4326 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4327 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4328 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4329}
4330
4331
4332/** \#GP(n) - 0d. */
4333VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4334{
4335 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4336 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4337}
4338
4339
4340/** \#GP(0) - 0d. */
4341VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4342{
4343 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4344 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4345}
4346
4347#ifdef IEM_WITH_SETJMP
4348/** \#GP(0) - 0d. */
4349DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4350{
4351 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4352 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4353}
4354#endif
4355
4356
4357/** \#GP(sel) - 0d. */
4358VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4359{
4360 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4361 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4362 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4363 Sel & ~X86_SEL_RPL, 0);
4364}
4365
4366
4367/** \#GP(0) - 0d. */
4368VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4369{
4370 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4371 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4372}
4373
4374
4375/** \#GP(sel) - 0d. */
4376VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4377{
4378 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4379 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4380 NOREF(iSegReg); NOREF(fAccess);
4381 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4382 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4383}
4384
4385#ifdef IEM_WITH_SETJMP
4386/** \#GP(sel) - 0d, longjmp. */
4387DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4388{
4389 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4390 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4391 NOREF(iSegReg); NOREF(fAccess);
4392 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4393 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4394}
4395#endif
4396
4397/** \#GP(sel) - 0d. */
4398VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4399{
4400 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4401 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4402 NOREF(Sel);
4403 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4404}
4405
4406#ifdef IEM_WITH_SETJMP
4407/** \#GP(sel) - 0d, longjmp. */
4408DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4409{
4410 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4411 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4412 NOREF(Sel);
4413 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4414}
4415#endif
4416
4417
4418/** \#GP(sel) - 0d. */
4419VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4420{
4421 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4422 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4423 NOREF(iSegReg); NOREF(fAccess);
4424 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4425}
4426
4427#ifdef IEM_WITH_SETJMP
4428/** \#GP(sel) - 0d, longjmp. */
4429DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4430{
4431 NOREF(iSegReg); NOREF(fAccess);
4432 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4433}
4434#endif
4435
4436
4437/** \#PF(n) - 0e. */
4438VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4439{
4440 uint16_t uErr;
4441 switch (rc)
4442 {
4443 case VERR_PAGE_NOT_PRESENT:
4444 case VERR_PAGE_TABLE_NOT_PRESENT:
4445 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4446 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4447 uErr = 0;
4448 break;
4449
4450 case VERR_RESERVED_PAGE_TABLE_BITS:
4451 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4452 break;
4453
4454 default:
4455 AssertMsgFailed(("%Rrc\n", rc));
4456 RT_FALL_THRU();
4457 case VERR_ACCESS_DENIED:
4458 uErr = X86_TRAP_PF_P;
4459 break;
4460 }
4461
4462 if (IEM_GET_CPL(pVCpu) == 3)
4463 uErr |= X86_TRAP_PF_US;
4464
4465 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4466 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4467 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4468 uErr |= X86_TRAP_PF_ID;
4469
4470#if 0 /* This is so much non-sense, really. Why was it done like that? */
4471 /* Note! RW access callers reporting a WRITE protection fault, will clear
4472 the READ flag before calling. So, read-modify-write accesses (RW)
4473 can safely be reported as READ faults. */
4474 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4475 uErr |= X86_TRAP_PF_RW;
4476#else
4477 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4478 {
4479 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4480 /// (regardless of outcome of the comparison in the latter case).
4481 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4482 uErr |= X86_TRAP_PF_RW;
4483 }
4484#endif
4485
4486 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4487 of the memory operand rather than at the start of it. (Not sure what
4488 happens if it crosses a page boundrary.) The current heuristics for
4489 this is to report the #PF for the last byte if the access is more than
4490 64 bytes. This is probably not correct, but we can work that out later,
4491 main objective now is to get FXSAVE to work like for real hardware and
4492 make bs3-cpu-basic2 work. */
4493 if (cbAccess <= 64)
4494 { /* likely*/ }
4495 else
4496 GCPtrWhere += cbAccess - 1;
4497
4498 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4499 uErr, GCPtrWhere);
4500}
4501
4502#ifdef IEM_WITH_SETJMP
4503/** \#PF(n) - 0e, longjmp. */
4504DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4505 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4506{
4507 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4508}
4509#endif
4510
4511
4512/** \#MF(0) - 10. */
4513VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4514{
4515 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4516 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4517
4518 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4519 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4520 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4521}
4522
4523#ifdef IEM_WITH_SETJMP
4524/** \#MF(0) - 10, longjmp. */
4525DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4526{
4527 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4528}
4529#endif
4530
4531
4532/** \#AC(0) - 11. */
4533VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4534{
4535 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4536}
4537
4538#ifdef IEM_WITH_SETJMP
4539/** \#AC(0) - 11, longjmp. */
4540DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4541{
4542 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4543}
4544#endif
4545
4546
4547/** \#XF(0)/\#XM(0) - 19. */
4548VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4549{
4550 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4551}
4552
4553
4554#ifdef IEM_WITH_SETJMP
4555/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4556DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4557{
4558 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4559}
4560#endif
4561
4562
4563/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4564IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4565{
4566 NOREF(cbInstr);
4567 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4568}
4569
4570
4571/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4572IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4573{
4574 NOREF(cbInstr);
4575 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4576}
4577
4578
4579/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4580IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4581{
4582 NOREF(cbInstr);
4583 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4584}
4585
4586
4587/** @} */
4588
4589/** @name Common opcode decoders.
4590 * @{
4591 */
4592//#include <iprt/mem.h>
4593
4594/**
4595 * Used to add extra details about a stub case.
4596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4597 */
4598void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4599{
4600#if defined(LOG_ENABLED) && defined(IN_RING3)
4601 PVM pVM = pVCpu->CTX_SUFF(pVM);
4602 char szRegs[4096];
4603 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4604 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4605 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4606 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4607 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4608 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4609 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4610 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4611 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4612 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4613 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4614 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4615 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4616 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4617 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4618 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4619 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4620 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4621 " efer=%016VR{efer}\n"
4622 " pat=%016VR{pat}\n"
4623 " sf_mask=%016VR{sf_mask}\n"
4624 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4625 " lstar=%016VR{lstar}\n"
4626 " star=%016VR{star} cstar=%016VR{cstar}\n"
4627 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4628 );
4629
4630 char szInstr[256];
4631 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4632 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4633 szInstr, sizeof(szInstr), NULL);
4634
4635 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4636#else
4637 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4638#endif
4639}
4640
4641/** @} */
4642
4643
4644
4645/** @name Register Access.
4646 * @{
4647 */
4648
4649/**
4650 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4651 *
4652 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4653 * segment limit.
4654 *
4655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4656 * @param cbInstr Instruction size.
4657 * @param offNextInstr The offset of the next instruction.
4658 * @param enmEffOpSize Effective operand size.
4659 */
4660VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4661 IEMMODE enmEffOpSize) RT_NOEXCEPT
4662{
4663 switch (enmEffOpSize)
4664 {
4665 case IEMMODE_16BIT:
4666 {
4667 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4668 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4669 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4670 pVCpu->cpum.GstCtx.rip = uNewIp;
4671 else
4672 return iemRaiseGeneralProtectionFault0(pVCpu);
4673 break;
4674 }
4675
4676 case IEMMODE_32BIT:
4677 {
4678 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4679 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4680
4681 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4682 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4683 pVCpu->cpum.GstCtx.rip = uNewEip;
4684 else
4685 return iemRaiseGeneralProtectionFault0(pVCpu);
4686 break;
4687 }
4688
4689 case IEMMODE_64BIT:
4690 {
4691 Assert(IEM_IS_64BIT_CODE(pVCpu));
4692
4693 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4694 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4695 pVCpu->cpum.GstCtx.rip = uNewRip;
4696 else
4697 return iemRaiseGeneralProtectionFault0(pVCpu);
4698 break;
4699 }
4700
4701 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4702 }
4703
4704#ifndef IEM_WITH_CODE_TLB
4705 /* Flush the prefetch buffer. */
4706 pVCpu->iem.s.cbOpcode = cbInstr;
4707#endif
4708
4709 /*
4710 * Clear RF and finish the instruction (maybe raise #DB).
4711 */
4712 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4713}
4714
4715
4716/**
4717 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4718 *
4719 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4720 * segment limit.
4721 *
4722 * @returns Strict VBox status code.
4723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4724 * @param cbInstr Instruction size.
4725 * @param offNextInstr The offset of the next instruction.
4726 */
4727VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4728{
4729 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4730
4731 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4732 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4733 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4734 pVCpu->cpum.GstCtx.rip = uNewIp;
4735 else
4736 return iemRaiseGeneralProtectionFault0(pVCpu);
4737
4738#ifndef IEM_WITH_CODE_TLB
4739 /* Flush the prefetch buffer. */
4740 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4741#endif
4742
4743 /*
4744 * Clear RF and finish the instruction (maybe raise #DB).
4745 */
4746 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4747}
4748
4749
4750/**
4751 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4752 *
4753 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4754 * segment limit.
4755 *
4756 * @returns Strict VBox status code.
4757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4758 * @param cbInstr Instruction size.
4759 * @param offNextInstr The offset of the next instruction.
4760 * @param enmEffOpSize Effective operand size.
4761 */
4762VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4763 IEMMODE enmEffOpSize) RT_NOEXCEPT
4764{
4765 if (enmEffOpSize == IEMMODE_32BIT)
4766 {
4767 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4768
4769 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4770 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4771 pVCpu->cpum.GstCtx.rip = uNewEip;
4772 else
4773 return iemRaiseGeneralProtectionFault0(pVCpu);
4774 }
4775 else
4776 {
4777 Assert(enmEffOpSize == IEMMODE_64BIT);
4778
4779 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4780 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4781 pVCpu->cpum.GstCtx.rip = uNewRip;
4782 else
4783 return iemRaiseGeneralProtectionFault0(pVCpu);
4784 }
4785
4786#ifndef IEM_WITH_CODE_TLB
4787 /* Flush the prefetch buffer. */
4788 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4789#endif
4790
4791 /*
4792 * Clear RF and finish the instruction (maybe raise #DB).
4793 */
4794 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4795}
4796
4797/** @} */
4798
4799
4800/** @name FPU access and helpers.
4801 *
4802 * @{
4803 */
4804
4805/**
4806 * Updates the x87.DS and FPUDP registers.
4807 *
4808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4809 * @param pFpuCtx The FPU context.
4810 * @param iEffSeg The effective segment register.
4811 * @param GCPtrEff The effective address relative to @a iEffSeg.
4812 */
4813DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4814{
4815 RTSEL sel;
4816 switch (iEffSeg)
4817 {
4818 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4819 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4820 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4821 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4822 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4823 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4824 default:
4825 AssertMsgFailed(("%d\n", iEffSeg));
4826 sel = pVCpu->cpum.GstCtx.ds.Sel;
4827 }
4828 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4829 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4830 {
4831 pFpuCtx->DS = 0;
4832 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4833 }
4834 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4835 {
4836 pFpuCtx->DS = sel;
4837 pFpuCtx->FPUDP = GCPtrEff;
4838 }
4839 else
4840 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4841}
4842
4843
4844/**
4845 * Rotates the stack registers in the push direction.
4846 *
4847 * @param pFpuCtx The FPU context.
4848 * @remarks This is a complete waste of time, but fxsave stores the registers in
4849 * stack order.
4850 */
4851DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4852{
4853 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4854 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4855 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4856 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4857 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4858 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4859 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4860 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4861 pFpuCtx->aRegs[0].r80 = r80Tmp;
4862}
4863
4864
4865/**
4866 * Rotates the stack registers in the pop direction.
4867 *
4868 * @param pFpuCtx The FPU context.
4869 * @remarks This is a complete waste of time, but fxsave stores the registers in
4870 * stack order.
4871 */
4872DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4873{
4874 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4875 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4876 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4877 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4878 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4879 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4880 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4881 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4882 pFpuCtx->aRegs[7].r80 = r80Tmp;
4883}
4884
4885
4886/**
4887 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4888 * exception prevents it.
4889 *
4890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4891 * @param pResult The FPU operation result to push.
4892 * @param pFpuCtx The FPU context.
4893 */
4894static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4895{
4896 /* Update FSW and bail if there are pending exceptions afterwards. */
4897 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4898 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4899 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4900 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4901 {
4902 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4903 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4904 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4905 pFpuCtx->FSW = fFsw;
4906 return;
4907 }
4908
4909 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4910 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4911 {
4912 /* All is fine, push the actual value. */
4913 pFpuCtx->FTW |= RT_BIT(iNewTop);
4914 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4915 }
4916 else if (pFpuCtx->FCW & X86_FCW_IM)
4917 {
4918 /* Masked stack overflow, push QNaN. */
4919 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4920 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4921 }
4922 else
4923 {
4924 /* Raise stack overflow, don't push anything. */
4925 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4926 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4927 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4928 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4929 return;
4930 }
4931
4932 fFsw &= ~X86_FSW_TOP_MASK;
4933 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4934 pFpuCtx->FSW = fFsw;
4935
4936 iemFpuRotateStackPush(pFpuCtx);
4937 RT_NOREF(pVCpu);
4938}
4939
4940
4941/**
4942 * Stores a result in a FPU register and updates the FSW and FTW.
4943 *
4944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4945 * @param pFpuCtx The FPU context.
4946 * @param pResult The result to store.
4947 * @param iStReg Which FPU register to store it in.
4948 */
4949static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4950{
4951 Assert(iStReg < 8);
4952 uint16_t fNewFsw = pFpuCtx->FSW;
4953 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4954 fNewFsw &= ~X86_FSW_C_MASK;
4955 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4956 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4957 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4958 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4959 pFpuCtx->FSW = fNewFsw;
4960 pFpuCtx->FTW |= RT_BIT(iReg);
4961 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4962 RT_NOREF(pVCpu);
4963}
4964
4965
4966/**
4967 * Only updates the FPU status word (FSW) with the result of the current
4968 * instruction.
4969 *
4970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4971 * @param pFpuCtx The FPU context.
4972 * @param u16FSW The FSW output of the current instruction.
4973 */
4974static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4975{
4976 uint16_t fNewFsw = pFpuCtx->FSW;
4977 fNewFsw &= ~X86_FSW_C_MASK;
4978 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4979 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4980 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4981 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4982 pFpuCtx->FSW = fNewFsw;
4983 RT_NOREF(pVCpu);
4984}
4985
4986
4987/**
4988 * Pops one item off the FPU stack if no pending exception prevents it.
4989 *
4990 * @param pFpuCtx The FPU context.
4991 */
4992static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4993{
4994 /* Check pending exceptions. */
4995 uint16_t uFSW = pFpuCtx->FSW;
4996 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4997 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4998 return;
4999
5000 /* TOP--. */
5001 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5002 uFSW &= ~X86_FSW_TOP_MASK;
5003 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5004 pFpuCtx->FSW = uFSW;
5005
5006 /* Mark the previous ST0 as empty. */
5007 iOldTop >>= X86_FSW_TOP_SHIFT;
5008 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5009
5010 /* Rotate the registers. */
5011 iemFpuRotateStackPop(pFpuCtx);
5012}
5013
5014
5015/**
5016 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5017 *
5018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5019 * @param pResult The FPU operation result to push.
5020 * @param uFpuOpcode The FPU opcode value.
5021 */
5022void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5023{
5024 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5025 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5026 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5027}
5028
5029
5030/**
5031 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5032 * and sets FPUDP and FPUDS.
5033 *
5034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5035 * @param pResult The FPU operation result to push.
5036 * @param iEffSeg The effective segment register.
5037 * @param GCPtrEff The effective address relative to @a iEffSeg.
5038 * @param uFpuOpcode The FPU opcode value.
5039 */
5040void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5041 uint16_t uFpuOpcode) RT_NOEXCEPT
5042{
5043 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5044 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5045 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5046 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5047}
5048
5049
5050/**
5051 * Replace ST0 with the first value and push the second onto the FPU stack,
5052 * unless a pending exception prevents it.
5053 *
5054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5055 * @param pResult The FPU operation result to store and push.
5056 * @param uFpuOpcode The FPU opcode value.
5057 */
5058void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5059{
5060 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5061 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5062
5063 /* Update FSW and bail if there are pending exceptions afterwards. */
5064 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5065 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5066 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5067 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5068 {
5069 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5070 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5071 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5072 pFpuCtx->FSW = fFsw;
5073 return;
5074 }
5075
5076 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5077 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5078 {
5079 /* All is fine, push the actual value. */
5080 pFpuCtx->FTW |= RT_BIT(iNewTop);
5081 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5082 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5083 }
5084 else if (pFpuCtx->FCW & X86_FCW_IM)
5085 {
5086 /* Masked stack overflow, push QNaN. */
5087 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5088 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5089 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5090 }
5091 else
5092 {
5093 /* Raise stack overflow, don't push anything. */
5094 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5095 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5096 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5097 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5098 return;
5099 }
5100
5101 fFsw &= ~X86_FSW_TOP_MASK;
5102 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5103 pFpuCtx->FSW = fFsw;
5104
5105 iemFpuRotateStackPush(pFpuCtx);
5106}
5107
5108
5109/**
5110 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5111 * FOP.
5112 *
5113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5114 * @param pResult The result to store.
5115 * @param iStReg Which FPU register to store it in.
5116 * @param uFpuOpcode The FPU opcode value.
5117 */
5118void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5119{
5120 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5121 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5122 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5123}
5124
5125
5126/**
5127 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5128 * FOP, and then pops the stack.
5129 *
5130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5131 * @param pResult The result to store.
5132 * @param iStReg Which FPU register to store it in.
5133 * @param uFpuOpcode The FPU opcode value.
5134 */
5135void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5136{
5137 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5138 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5139 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5140 iemFpuMaybePopOne(pFpuCtx);
5141}
5142
5143
5144/**
5145 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5146 * FPUDP, and FPUDS.
5147 *
5148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5149 * @param pResult The result to store.
5150 * @param iStReg Which FPU register to store it in.
5151 * @param iEffSeg The effective memory operand selector register.
5152 * @param GCPtrEff The effective memory operand offset.
5153 * @param uFpuOpcode The FPU opcode value.
5154 */
5155void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5156 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5157{
5158 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5159 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5160 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5161 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5162}
5163
5164
5165/**
5166 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5167 * FPUDP, and FPUDS, and then pops the stack.
5168 *
5169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5170 * @param pResult The result to store.
5171 * @param iStReg Which FPU register to store it in.
5172 * @param iEffSeg The effective memory operand selector register.
5173 * @param GCPtrEff The effective memory operand offset.
5174 * @param uFpuOpcode The FPU opcode value.
5175 */
5176void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5177 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5178{
5179 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5180 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5181 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5182 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5183 iemFpuMaybePopOne(pFpuCtx);
5184}
5185
5186
5187/**
5188 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5189 *
5190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5191 * @param uFpuOpcode The FPU opcode value.
5192 */
5193void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5194{
5195 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5196 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5197}
5198
5199
5200/**
5201 * Updates the FSW, FOP, FPUIP, and FPUCS.
5202 *
5203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5204 * @param u16FSW The FSW from the current instruction.
5205 * @param uFpuOpcode The FPU opcode value.
5206 */
5207void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5208{
5209 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5210 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5211 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5212}
5213
5214
5215/**
5216 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5217 *
5218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5219 * @param u16FSW The FSW from the current instruction.
5220 * @param uFpuOpcode The FPU opcode value.
5221 */
5222void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5223{
5224 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5225 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5226 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5227 iemFpuMaybePopOne(pFpuCtx);
5228}
5229
5230
5231/**
5232 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5233 *
5234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5235 * @param u16FSW The FSW from the current instruction.
5236 * @param iEffSeg The effective memory operand selector register.
5237 * @param GCPtrEff The effective memory operand offset.
5238 * @param uFpuOpcode The FPU opcode value.
5239 */
5240void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5241{
5242 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5243 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5244 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5245 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5246}
5247
5248
5249/**
5250 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5251 *
5252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5253 * @param u16FSW The FSW from the current instruction.
5254 * @param uFpuOpcode The FPU opcode value.
5255 */
5256void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5257{
5258 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5259 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5260 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5261 iemFpuMaybePopOne(pFpuCtx);
5262 iemFpuMaybePopOne(pFpuCtx);
5263}
5264
5265
5266/**
5267 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5268 *
5269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5270 * @param u16FSW The FSW from the current instruction.
5271 * @param iEffSeg The effective memory operand selector register.
5272 * @param GCPtrEff The effective memory operand offset.
5273 * @param uFpuOpcode The FPU opcode value.
5274 */
5275void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5276 uint16_t uFpuOpcode) RT_NOEXCEPT
5277{
5278 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5279 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5280 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5281 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5282 iemFpuMaybePopOne(pFpuCtx);
5283}
5284
5285
5286/**
5287 * Worker routine for raising an FPU stack underflow exception.
5288 *
5289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5290 * @param pFpuCtx The FPU context.
5291 * @param iStReg The stack register being accessed.
5292 */
5293static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5294{
5295 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5296 if (pFpuCtx->FCW & X86_FCW_IM)
5297 {
5298 /* Masked underflow. */
5299 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5300 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5301 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5302 if (iStReg != UINT8_MAX)
5303 {
5304 pFpuCtx->FTW |= RT_BIT(iReg);
5305 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5306 }
5307 }
5308 else
5309 {
5310 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5311 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5312 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5313 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5314 }
5315 RT_NOREF(pVCpu);
5316}
5317
5318
5319/**
5320 * Raises a FPU stack underflow exception.
5321 *
5322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5323 * @param iStReg The destination register that should be loaded
5324 * with QNaN if \#IS is not masked. Specify
5325 * UINT8_MAX if none (like for fcom).
5326 * @param uFpuOpcode The FPU opcode value.
5327 */
5328void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5329{
5330 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5331 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5332 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5333}
5334
5335
5336void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5337{
5338 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5339 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5340 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5341 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5342}
5343
5344
5345void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5346{
5347 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5348 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5349 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5350 iemFpuMaybePopOne(pFpuCtx);
5351}
5352
5353
5354void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5355 uint16_t uFpuOpcode) RT_NOEXCEPT
5356{
5357 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5358 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5359 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5360 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5361 iemFpuMaybePopOne(pFpuCtx);
5362}
5363
5364
5365void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5366{
5367 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5368 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5369 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5370 iemFpuMaybePopOne(pFpuCtx);
5371 iemFpuMaybePopOne(pFpuCtx);
5372}
5373
5374
5375void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5376{
5377 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5378 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5379
5380 if (pFpuCtx->FCW & X86_FCW_IM)
5381 {
5382 /* Masked overflow - Push QNaN. */
5383 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5384 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5385 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5386 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5387 pFpuCtx->FTW |= RT_BIT(iNewTop);
5388 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5389 iemFpuRotateStackPush(pFpuCtx);
5390 }
5391 else
5392 {
5393 /* Exception pending - don't change TOP or the register stack. */
5394 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5395 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5396 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5397 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5398 }
5399}
5400
5401
5402void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5403{
5404 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5405 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5406
5407 if (pFpuCtx->FCW & X86_FCW_IM)
5408 {
5409 /* Masked overflow - Push QNaN. */
5410 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5411 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5412 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5413 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5414 pFpuCtx->FTW |= RT_BIT(iNewTop);
5415 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5416 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5417 iemFpuRotateStackPush(pFpuCtx);
5418 }
5419 else
5420 {
5421 /* Exception pending - don't change TOP or the register stack. */
5422 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5423 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5424 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5425 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5426 }
5427}
5428
5429
5430/**
5431 * Worker routine for raising an FPU stack overflow exception on a push.
5432 *
5433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5434 * @param pFpuCtx The FPU context.
5435 */
5436static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5437{
5438 if (pFpuCtx->FCW & X86_FCW_IM)
5439 {
5440 /* Masked overflow. */
5441 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5442 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5443 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5444 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5445 pFpuCtx->FTW |= RT_BIT(iNewTop);
5446 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5447 iemFpuRotateStackPush(pFpuCtx);
5448 }
5449 else
5450 {
5451 /* Exception pending - don't change TOP or the register stack. */
5452 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5453 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5454 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5455 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5456 }
5457 RT_NOREF(pVCpu);
5458}
5459
5460
5461/**
5462 * Raises a FPU stack overflow exception on a push.
5463 *
5464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5465 * @param uFpuOpcode The FPU opcode value.
5466 */
5467void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5468{
5469 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5470 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5471 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5472}
5473
5474
5475/**
5476 * Raises a FPU stack overflow exception on a push with a memory operand.
5477 *
5478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5479 * @param iEffSeg The effective memory operand selector register.
5480 * @param GCPtrEff The effective memory operand offset.
5481 * @param uFpuOpcode The FPU opcode value.
5482 */
5483void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5484{
5485 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5486 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5487 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5488 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5489}
5490
5491/** @} */
5492
5493
5494/** @name Memory access.
5495 *
5496 * @{
5497 */
5498
5499#undef LOG_GROUP
5500#define LOG_GROUP LOG_GROUP_IEM_MEM
5501
5502/**
5503 * Updates the IEMCPU::cbWritten counter if applicable.
5504 *
5505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5506 * @param fAccess The access being accounted for.
5507 * @param cbMem The access size.
5508 */
5509DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5510{
5511 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5512 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5513 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5514}
5515
5516
5517/**
5518 * Applies the segment limit, base and attributes.
5519 *
5520 * This may raise a \#GP or \#SS.
5521 *
5522 * @returns VBox strict status code.
5523 *
5524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5525 * @param fAccess The kind of access which is being performed.
5526 * @param iSegReg The index of the segment register to apply.
5527 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5528 * TSS, ++).
5529 * @param cbMem The access size.
5530 * @param pGCPtrMem Pointer to the guest memory address to apply
5531 * segmentation to. Input and output parameter.
5532 */
5533VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5534{
5535 if (iSegReg == UINT8_MAX)
5536 return VINF_SUCCESS;
5537
5538 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5539 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5540 switch (IEM_GET_CPU_MODE(pVCpu))
5541 {
5542 case IEMMODE_16BIT:
5543 case IEMMODE_32BIT:
5544 {
5545 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5546 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5547
5548 if ( pSel->Attr.n.u1Present
5549 && !pSel->Attr.n.u1Unusable)
5550 {
5551 Assert(pSel->Attr.n.u1DescType);
5552 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5553 {
5554 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5555 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5556 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5557
5558 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5559 {
5560 /** @todo CPL check. */
5561 }
5562
5563 /*
5564 * There are two kinds of data selectors, normal and expand down.
5565 */
5566 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5567 {
5568 if ( GCPtrFirst32 > pSel->u32Limit
5569 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5570 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5571 }
5572 else
5573 {
5574 /*
5575 * The upper boundary is defined by the B bit, not the G bit!
5576 */
5577 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5578 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5579 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5580 }
5581 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5582 }
5583 else
5584 {
5585 /*
5586 * Code selector and usually be used to read thru, writing is
5587 * only permitted in real and V8086 mode.
5588 */
5589 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5590 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5591 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5592 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5593 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5594
5595 if ( GCPtrFirst32 > pSel->u32Limit
5596 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5597 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5598
5599 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5600 {
5601 /** @todo CPL check. */
5602 }
5603
5604 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5605 }
5606 }
5607 else
5608 return iemRaiseGeneralProtectionFault0(pVCpu);
5609 return VINF_SUCCESS;
5610 }
5611
5612 case IEMMODE_64BIT:
5613 {
5614 RTGCPTR GCPtrMem = *pGCPtrMem;
5615 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5616 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5617
5618 Assert(cbMem >= 1);
5619 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5620 return VINF_SUCCESS;
5621 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5622 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5623 return iemRaiseGeneralProtectionFault0(pVCpu);
5624 }
5625
5626 default:
5627 AssertFailedReturn(VERR_IEM_IPE_7);
5628 }
5629}
5630
5631
5632/**
5633 * Translates a virtual address to a physical physical address and checks if we
5634 * can access the page as specified.
5635 *
5636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5637 * @param GCPtrMem The virtual address.
5638 * @param cbAccess The access size, for raising \#PF correctly for
5639 * FXSAVE and such.
5640 * @param fAccess The intended access.
5641 * @param pGCPhysMem Where to return the physical address.
5642 */
5643VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5644 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5645{
5646 /** @todo Need a different PGM interface here. We're currently using
5647 * generic / REM interfaces. this won't cut it for R0. */
5648 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5649 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5650 * here. */
5651 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5652 PGMPTWALKFAST WalkFast;
5653 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5654 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5655 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5656 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5657 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5658 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5659 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5660 fQPage |= PGMQPAGE_F_USER_MODE;
5661 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5662 if (RT_SUCCESS(rc))
5663 {
5664 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5665
5666 /* If the page is writable and does not have the no-exec bit set, all
5667 access is allowed. Otherwise we'll have to check more carefully... */
5668 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
5669 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
5670 || (WalkFast.fEffective & X86_PTE_RW)
5671 || ( ( IEM_GET_CPL(pVCpu) != 3
5672 || (fAccess & IEM_ACCESS_WHAT_SYS))
5673 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
5674 && ( (WalkFast.fEffective & X86_PTE_US)
5675 || IEM_GET_CPL(pVCpu) != 3
5676 || (fAccess & IEM_ACCESS_WHAT_SYS) )
5677 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
5678 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
5679 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5680 )
5681 );
5682
5683 /* PGMGstQueryPageFast sets the A & D bits. */
5684 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5685 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
5686
5687 *pGCPhysMem = WalkFast.GCPhys;
5688 return VINF_SUCCESS;
5689 }
5690
5691 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5692 /** @todo Check unassigned memory in unpaged mode. */
5693#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5694 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
5695 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5696#endif
5697 *pGCPhysMem = NIL_RTGCPHYS;
5698 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5699}
5700
5701#if 0 /*unused*/
5702/**
5703 * Looks up a memory mapping entry.
5704 *
5705 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5707 * @param pvMem The memory address.
5708 * @param fAccess The access to.
5709 */
5710DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5711{
5712 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5713 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5714 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5715 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5716 return 0;
5717 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5718 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5719 return 1;
5720 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5721 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5722 return 2;
5723 return VERR_NOT_FOUND;
5724}
5725#endif
5726
5727/**
5728 * Finds a free memmap entry when using iNextMapping doesn't work.
5729 *
5730 * @returns Memory mapping index, 1024 on failure.
5731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5732 */
5733static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5734{
5735 /*
5736 * The easy case.
5737 */
5738 if (pVCpu->iem.s.cActiveMappings == 0)
5739 {
5740 pVCpu->iem.s.iNextMapping = 1;
5741 return 0;
5742 }
5743
5744 /* There should be enough mappings for all instructions. */
5745 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5746
5747 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5748 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5749 return i;
5750
5751 AssertFailedReturn(1024);
5752}
5753
5754
5755/**
5756 * Commits a bounce buffer that needs writing back and unmaps it.
5757 *
5758 * @returns Strict VBox status code.
5759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5760 * @param iMemMap The index of the buffer to commit.
5761 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5762 * Always false in ring-3, obviously.
5763 */
5764static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5765{
5766 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5767 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5768#ifdef IN_RING3
5769 Assert(!fPostponeFail);
5770 RT_NOREF_PV(fPostponeFail);
5771#endif
5772
5773 /*
5774 * Do the writing.
5775 */
5776 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5777 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5778 {
5779 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5780 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5781 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5782 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5783 {
5784 /*
5785 * Carefully and efficiently dealing with access handler return
5786 * codes make this a little bloated.
5787 */
5788 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5789 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5790 pbBuf,
5791 cbFirst,
5792 PGMACCESSORIGIN_IEM);
5793 if (rcStrict == VINF_SUCCESS)
5794 {
5795 if (cbSecond)
5796 {
5797 rcStrict = PGMPhysWrite(pVM,
5798 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5799 pbBuf + cbFirst,
5800 cbSecond,
5801 PGMACCESSORIGIN_IEM);
5802 if (rcStrict == VINF_SUCCESS)
5803 { /* nothing */ }
5804 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5805 {
5806 LogEx(LOG_GROUP_IEM,
5807 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5810 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5811 }
5812#ifndef IN_RING3
5813 else if (fPostponeFail)
5814 {
5815 LogEx(LOG_GROUP_IEM,
5816 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5819 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5820 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5821 return iemSetPassUpStatus(pVCpu, rcStrict);
5822 }
5823#endif
5824 else
5825 {
5826 LogEx(LOG_GROUP_IEM,
5827 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5828 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5830 return rcStrict;
5831 }
5832 }
5833 }
5834 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5835 {
5836 if (!cbSecond)
5837 {
5838 LogEx(LOG_GROUP_IEM,
5839 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5841 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5842 }
5843 else
5844 {
5845 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5846 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5847 pbBuf + cbFirst,
5848 cbSecond,
5849 PGMACCESSORIGIN_IEM);
5850 if (rcStrict2 == VINF_SUCCESS)
5851 {
5852 LogEx(LOG_GROUP_IEM,
5853 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5856 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5857 }
5858 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5859 {
5860 LogEx(LOG_GROUP_IEM,
5861 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5864 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5865 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5866 }
5867#ifndef IN_RING3
5868 else if (fPostponeFail)
5869 {
5870 LogEx(LOG_GROUP_IEM,
5871 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5874 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5875 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5876 return iemSetPassUpStatus(pVCpu, rcStrict);
5877 }
5878#endif
5879 else
5880 {
5881 LogEx(LOG_GROUP_IEM,
5882 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5885 return rcStrict2;
5886 }
5887 }
5888 }
5889#ifndef IN_RING3
5890 else if (fPostponeFail)
5891 {
5892 LogEx(LOG_GROUP_IEM,
5893 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5896 if (!cbSecond)
5897 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5898 else
5899 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5900 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5901 return iemSetPassUpStatus(pVCpu, rcStrict);
5902 }
5903#endif
5904 else
5905 {
5906 LogEx(LOG_GROUP_IEM,
5907 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5908 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5909 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5910 return rcStrict;
5911 }
5912 }
5913 else
5914 {
5915 /*
5916 * No access handlers, much simpler.
5917 */
5918 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5919 if (RT_SUCCESS(rc))
5920 {
5921 if (cbSecond)
5922 {
5923 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5924 if (RT_SUCCESS(rc))
5925 { /* likely */ }
5926 else
5927 {
5928 LogEx(LOG_GROUP_IEM,
5929 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5930 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5932 return rc;
5933 }
5934 }
5935 }
5936 else
5937 {
5938 LogEx(LOG_GROUP_IEM,
5939 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5940 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5942 return rc;
5943 }
5944 }
5945 }
5946
5947#if defined(IEM_LOG_MEMORY_WRITES)
5948 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5949 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5950 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5951 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5952 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5953 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5954
5955 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5956 g_cbIemWrote = cbWrote;
5957 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5958#endif
5959
5960 /*
5961 * Free the mapping entry.
5962 */
5963 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5964 Assert(pVCpu->iem.s.cActiveMappings != 0);
5965 pVCpu->iem.s.cActiveMappings--;
5966 return VINF_SUCCESS;
5967}
5968
5969
5970/**
5971 * iemMemMap worker that deals with a request crossing pages.
5972 */
5973static VBOXSTRICTRC
5974iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
5975 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5976{
5977 Assert(cbMem <= GUEST_PAGE_SIZE);
5978
5979 /*
5980 * Do the address translations.
5981 */
5982 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5983 RTGCPHYS GCPhysFirst;
5984 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5985 if (rcStrict != VINF_SUCCESS)
5986 return rcStrict;
5987 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5988
5989 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5990 RTGCPHYS GCPhysSecond;
5991 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5992 cbSecondPage, fAccess, &GCPhysSecond);
5993 if (rcStrict != VINF_SUCCESS)
5994 return rcStrict;
5995 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5996 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5997
5998 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5999
6000 /*
6001 * Read in the current memory content if it's a read, execute or partial
6002 * write access.
6003 */
6004 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6005
6006 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6007 {
6008 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6009 {
6010 /*
6011 * Must carefully deal with access handler status codes here,
6012 * makes the code a bit bloated.
6013 */
6014 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6015 if (rcStrict == VINF_SUCCESS)
6016 {
6017 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6018 if (rcStrict == VINF_SUCCESS)
6019 { /*likely */ }
6020 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6021 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6022 else
6023 {
6024 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6025 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6026 return rcStrict;
6027 }
6028 }
6029 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6030 {
6031 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6032 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6033 {
6034 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6035 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6036 }
6037 else
6038 {
6039 LogEx(LOG_GROUP_IEM,
6040 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6041 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6042 return rcStrict2;
6043 }
6044 }
6045 else
6046 {
6047 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6048 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6049 return rcStrict;
6050 }
6051 }
6052 else
6053 {
6054 /*
6055 * No informational status codes here, much more straight forward.
6056 */
6057 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6058 if (RT_SUCCESS(rc))
6059 {
6060 Assert(rc == VINF_SUCCESS);
6061 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6062 if (RT_SUCCESS(rc))
6063 Assert(rc == VINF_SUCCESS);
6064 else
6065 {
6066 LogEx(LOG_GROUP_IEM,
6067 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6068 return rc;
6069 }
6070 }
6071 else
6072 {
6073 LogEx(LOG_GROUP_IEM,
6074 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6075 return rc;
6076 }
6077 }
6078 }
6079#ifdef VBOX_STRICT
6080 else
6081 memset(pbBuf, 0xcc, cbMem);
6082 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6083 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6084#endif
6085 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6086
6087 /*
6088 * Commit the bounce buffer entry.
6089 */
6090 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6091 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6092 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6093 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6094 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6095 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6096 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6097 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6098 pVCpu->iem.s.cActiveMappings++;
6099
6100 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6101 *ppvMem = pbBuf;
6102 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6103 return VINF_SUCCESS;
6104}
6105
6106
6107/**
6108 * iemMemMap woker that deals with iemMemPageMap failures.
6109 */
6110static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6111 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6112{
6113 /*
6114 * Filter out conditions we can handle and the ones which shouldn't happen.
6115 */
6116 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6117 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6118 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6119 {
6120 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6121 return rcMap;
6122 }
6123 pVCpu->iem.s.cPotentialExits++;
6124
6125 /*
6126 * Read in the current memory content if it's a read, execute or partial
6127 * write access.
6128 */
6129 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6130 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6131 {
6132 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6133 memset(pbBuf, 0xff, cbMem);
6134 else
6135 {
6136 int rc;
6137 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6138 {
6139 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6140 if (rcStrict == VINF_SUCCESS)
6141 { /* nothing */ }
6142 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6143 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6144 else
6145 {
6146 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6147 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6148 return rcStrict;
6149 }
6150 }
6151 else
6152 {
6153 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6154 if (RT_SUCCESS(rc))
6155 { /* likely */ }
6156 else
6157 {
6158 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6159 GCPhysFirst, rc));
6160 return rc;
6161 }
6162 }
6163 }
6164 }
6165#ifdef VBOX_STRICT
6166 else
6167 memset(pbBuf, 0xcc, cbMem);
6168#endif
6169#ifdef VBOX_STRICT
6170 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6171 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6172#endif
6173
6174 /*
6175 * Commit the bounce buffer entry.
6176 */
6177 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6178 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6179 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6180 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6181 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6182 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6183 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6184 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6185 pVCpu->iem.s.cActiveMappings++;
6186
6187 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6188 *ppvMem = pbBuf;
6189 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6190 return VINF_SUCCESS;
6191}
6192
6193
6194
6195/**
6196 * Maps the specified guest memory for the given kind of access.
6197 *
6198 * This may be using bounce buffering of the memory if it's crossing a page
6199 * boundary or if there is an access handler installed for any of it. Because
6200 * of lock prefix guarantees, we're in for some extra clutter when this
6201 * happens.
6202 *
6203 * This may raise a \#GP, \#SS, \#PF or \#AC.
6204 *
6205 * @returns VBox strict status code.
6206 *
6207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6208 * @param ppvMem Where to return the pointer to the mapped memory.
6209 * @param pbUnmapInfo Where to return unmap info to be passed to
6210 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6211 * done.
6212 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6213 * 8, 12, 16, 32 or 512. When used by string operations
6214 * it can be up to a page.
6215 * @param iSegReg The index of the segment register to use for this
6216 * access. The base and limits are checked. Use UINT8_MAX
6217 * to indicate that no segmentation is required (for IDT,
6218 * GDT and LDT accesses).
6219 * @param GCPtrMem The address of the guest memory.
6220 * @param fAccess How the memory is being accessed. The
6221 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6222 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6223 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6224 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6225 * set.
6226 * @param uAlignCtl Alignment control:
6227 * - Bits 15:0 is the alignment mask.
6228 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6229 * IEM_MEMMAP_F_ALIGN_SSE, and
6230 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6231 * Pass zero to skip alignment.
6232 */
6233VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6234 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6235{
6236 /*
6237 * Check the input and figure out which mapping entry to use.
6238 */
6239 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6240 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6241 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6242 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6243 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6244
6245 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6246 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6247 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6248 {
6249 iMemMap = iemMemMapFindFree(pVCpu);
6250 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6251 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6252 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6253 pVCpu->iem.s.aMemMappings[2].fAccess),
6254 VERR_IEM_IPE_9);
6255 }
6256
6257 /*
6258 * Map the memory, checking that we can actually access it. If something
6259 * slightly complicated happens, fall back on bounce buffering.
6260 */
6261 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6262 if (rcStrict == VINF_SUCCESS)
6263 { /* likely */ }
6264 else
6265 return rcStrict;
6266
6267 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6268 { /* likely */ }
6269 else
6270 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6271
6272 /*
6273 * Alignment check.
6274 */
6275 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6276 { /* likelyish */ }
6277 else
6278 {
6279 /* Misaligned access. */
6280 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6281 {
6282 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6283 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6284 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6285 {
6286 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6287
6288 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6289 return iemRaiseAlignmentCheckException(pVCpu);
6290 }
6291 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6292 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6293 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6294 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6295 * that's what FXSAVE does on a 10980xe. */
6296 && iemMemAreAlignmentChecksEnabled(pVCpu))
6297 return iemRaiseAlignmentCheckException(pVCpu);
6298 else
6299 return iemRaiseGeneralProtectionFault0(pVCpu);
6300 }
6301
6302#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6303 /* If the access is atomic there are host platform alignmnet restrictions
6304 we need to conform with. */
6305 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6306# if defined(RT_ARCH_AMD64)
6307 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6308# elif defined(RT_ARCH_ARM64)
6309 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6310# else
6311# error port me
6312# endif
6313 )
6314 { /* okay */ }
6315 else
6316 {
6317 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6318 pVCpu->iem.s.cMisalignedAtomics += 1;
6319 return VINF_EM_EMULATE_SPLIT_LOCK;
6320 }
6321#endif
6322 }
6323
6324#ifdef IEM_WITH_DATA_TLB
6325 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6326
6327 /*
6328 * Get the TLB entry for this page and check PT flags.
6329 *
6330 * We reload the TLB entry if we need to set the dirty bit (accessed
6331 * should in theory always be set).
6332 */
6333 uint8_t *pbMem = NULL;
6334 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6335 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6336 if ( pTlbe->uTag == uTag
6337 && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0))) )
6338 {
6339 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6340
6341 /* If the page is either supervisor only or non-writable, we need to do
6342 more careful access checks. */
6343 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6344 {
6345 /* Write to read only memory? */
6346 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6347 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6348 && ( ( IEM_GET_CPL(pVCpu) == 3
6349 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6350 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6351 {
6352 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6353 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6354 }
6355
6356 /* Kernel memory accessed by userland? */
6357 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6358 && IEM_GET_CPL(pVCpu) == 3
6359 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6360 {
6361 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6362 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6363 }
6364 }
6365
6366 /* Look up the physical page info if necessary. */
6367 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6368# ifdef IN_RING3
6369 pbMem = pTlbe->pbMappingR3;
6370# else
6371 pbMem = NULL;
6372# endif
6373 else
6374 {
6375 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6376 { /* likely */ }
6377 else
6378 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6379 pTlbe->pbMappingR3 = NULL;
6380 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6381 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6382 &pbMem, &pTlbe->fFlagsAndPhysRev);
6383 AssertRCReturn(rc, rc);
6384# ifdef IN_RING3
6385 pTlbe->pbMappingR3 = pbMem;
6386# endif
6387 }
6388 }
6389 else
6390 {
6391 pVCpu->iem.s.DataTlb.cTlbMisses++;
6392
6393 /* This page table walking will set A bits as required by the access while performing the walk.
6394 ASSUMES these are set when the address is translated rather than on commit... */
6395 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6396 PGMPTWALKFAST WalkFast;
6397 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6398 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6399 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6400 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6401 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6402 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6403 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6404 fQPage |= PGMQPAGE_F_USER_MODE;
6405 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6406 if (RT_SUCCESS(rc))
6407 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6408 else
6409 {
6410 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6411# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6412 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6413 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6414# endif
6415 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6416 }
6417
6418 pTlbe->uTag = uTag;
6419 pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6420 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6421 pTlbe->GCPhys = GCPhysPg;
6422 pTlbe->pbMappingR3 = NULL;
6423 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6424 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6425 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6426 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6427 || IEM_GET_CPL(pVCpu) != 3
6428 || (fAccess & IEM_ACCESS_WHAT_SYS));
6429
6430 /* Resolve the physical address. */
6431 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6432 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6433 &pbMem, &pTlbe->fFlagsAndPhysRev);
6434 AssertRCReturn(rc, rc);
6435# ifdef IN_RING3
6436 pTlbe->pbMappingR3 = pbMem;
6437# endif
6438 }
6439
6440 /*
6441 * Check the physical page level access and mapping.
6442 */
6443 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6444 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6445 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6446 { /* probably likely */ }
6447 else
6448 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6449 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6450 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6451 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6452 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6453 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6454
6455 if (pbMem)
6456 {
6457 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6458 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6459 fAccess |= IEM_ACCESS_NOT_LOCKED;
6460 }
6461 else
6462 {
6463 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6464 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6465 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6466 if (rcStrict != VINF_SUCCESS)
6467 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6468 }
6469
6470 void * const pvMem = pbMem;
6471
6472 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6473 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6474 if (fAccess & IEM_ACCESS_TYPE_READ)
6475 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6476
6477#else /* !IEM_WITH_DATA_TLB */
6478
6479 RTGCPHYS GCPhysFirst;
6480 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6481 if (rcStrict != VINF_SUCCESS)
6482 return rcStrict;
6483
6484 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6485 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6486 if (fAccess & IEM_ACCESS_TYPE_READ)
6487 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6488
6489 void *pvMem;
6490 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6491 if (rcStrict != VINF_SUCCESS)
6492 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6493
6494#endif /* !IEM_WITH_DATA_TLB */
6495
6496 /*
6497 * Fill in the mapping table entry.
6498 */
6499 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6500 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6501 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6502 pVCpu->iem.s.cActiveMappings += 1;
6503
6504 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6505 *ppvMem = pvMem;
6506 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6507 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6508 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6509
6510 return VINF_SUCCESS;
6511}
6512
6513
6514/**
6515 * Commits the guest memory if bounce buffered and unmaps it.
6516 *
6517 * @returns Strict VBox status code.
6518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6519 * @param bUnmapInfo Unmap info set by iemMemMap.
6520 */
6521VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6522{
6523 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6524 AssertMsgReturn( (bUnmapInfo & 0x08)
6525 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6526 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6527 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6528 VERR_NOT_FOUND);
6529
6530 /* If it's bounce buffered, we may need to write back the buffer. */
6531 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6532 {
6533 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6534 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6535 }
6536 /* Otherwise unlock it. */
6537 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6538 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6539
6540 /* Free the entry. */
6541 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6542 Assert(pVCpu->iem.s.cActiveMappings != 0);
6543 pVCpu->iem.s.cActiveMappings--;
6544 return VINF_SUCCESS;
6545}
6546
6547
6548/**
6549 * Rolls back the guest memory (conceptually only) and unmaps it.
6550 *
6551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6552 * @param bUnmapInfo Unmap info set by iemMemMap.
6553 */
6554void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6555{
6556 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6557 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6558 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6559 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6560 == ((unsigned)bUnmapInfo >> 4),
6561 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6562
6563 /* Unlock it if necessary. */
6564 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6565 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6566
6567 /* Free the entry. */
6568 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6569 Assert(pVCpu->iem.s.cActiveMappings != 0);
6570 pVCpu->iem.s.cActiveMappings--;
6571}
6572
6573#ifdef IEM_WITH_SETJMP
6574
6575/**
6576 * Maps the specified guest memory for the given kind of access, longjmp on
6577 * error.
6578 *
6579 * This may be using bounce buffering of the memory if it's crossing a page
6580 * boundary or if there is an access handler installed for any of it. Because
6581 * of lock prefix guarantees, we're in for some extra clutter when this
6582 * happens.
6583 *
6584 * This may raise a \#GP, \#SS, \#PF or \#AC.
6585 *
6586 * @returns Pointer to the mapped memory.
6587 *
6588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6589 * @param bUnmapInfo Where to return unmap info to be passed to
6590 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6591 * iemMemCommitAndUnmapWoSafeJmp,
6592 * iemMemCommitAndUnmapRoSafeJmp,
6593 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6594 * when done.
6595 * @param cbMem The number of bytes to map. This is usually 1,
6596 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6597 * string operations it can be up to a page.
6598 * @param iSegReg The index of the segment register to use for
6599 * this access. The base and limits are checked.
6600 * Use UINT8_MAX to indicate that no segmentation
6601 * is required (for IDT, GDT and LDT accesses).
6602 * @param GCPtrMem The address of the guest memory.
6603 * @param fAccess How the memory is being accessed. The
6604 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6605 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6606 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6607 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6608 * set.
6609 * @param uAlignCtl Alignment control:
6610 * - Bits 15:0 is the alignment mask.
6611 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6612 * IEM_MEMMAP_F_ALIGN_SSE, and
6613 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6614 * Pass zero to skip alignment.
6615 */
6616void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6617 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6618{
6619 /*
6620 * Check the input, check segment access and adjust address
6621 * with segment base.
6622 */
6623 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6624 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6625 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6626
6627 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6628 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6629 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6630
6631 /*
6632 * Alignment check.
6633 */
6634 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6635 { /* likelyish */ }
6636 else
6637 {
6638 /* Misaligned access. */
6639 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6640 {
6641 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6642 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6643 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6644 {
6645 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6646
6647 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6648 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6649 }
6650 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6651 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6652 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6653 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6654 * that's what FXSAVE does on a 10980xe. */
6655 && iemMemAreAlignmentChecksEnabled(pVCpu))
6656 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6657 else
6658 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6659 }
6660
6661#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6662 /* If the access is atomic there are host platform alignmnet restrictions
6663 we need to conform with. */
6664 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6665# if defined(RT_ARCH_AMD64)
6666 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6667# elif defined(RT_ARCH_ARM64)
6668 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6669# else
6670# error port me
6671# endif
6672 )
6673 { /* okay */ }
6674 else
6675 {
6676 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6677 pVCpu->iem.s.cMisalignedAtomics += 1;
6678 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6679 }
6680#endif
6681 }
6682
6683 /*
6684 * Figure out which mapping entry to use.
6685 */
6686 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6687 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6688 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6689 {
6690 iMemMap = iemMemMapFindFree(pVCpu);
6691 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6692 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6693 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6694 pVCpu->iem.s.aMemMappings[2].fAccess),
6695 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6696 }
6697
6698 /*
6699 * Crossing a page boundary?
6700 */
6701 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6702 { /* No (likely). */ }
6703 else
6704 {
6705 void *pvMem;
6706 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6707 if (rcStrict == VINF_SUCCESS)
6708 return pvMem;
6709 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6710 }
6711
6712#ifdef IEM_WITH_DATA_TLB
6713 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6714
6715 /*
6716 * Get the TLB entry for this page checking that it has the A & D bits
6717 * set as per fAccess flags.
6718 */
6719 /** @todo make the caller pass these in with fAccess. */
6720 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6721 ? IEMTLBE_F_PT_NO_USER : 0;
6722 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6723 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6724 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6725 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6726 ? IEMTLBE_F_PT_NO_WRITE : 0)
6727 : 0;
6728 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6729
6730 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6731 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6732 if ( pTlbe->uTag == uTag
6733 && !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY))) )
6734 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6735 else
6736 {
6737 pVCpu->iem.s.DataTlb.cTlbMisses++;
6738
6739 /* This page table walking will set A and D bits as required by the
6740 access while performing the walk.
6741 ASSUMES these are set when the address is translated rather than on commit... */
6742 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6743 PGMPTWALKFAST WalkFast;
6744 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6745 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6746 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6747 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6748 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6749 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6750 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6751 fQPage |= PGMQPAGE_F_USER_MODE;
6752 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6753 if (RT_SUCCESS(rc))
6754 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6755 else
6756 {
6757 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6758# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6759 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6760 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6761# endif
6762 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6763 }
6764
6765 pTlbe->uTag = uTag;
6766 pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6767 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6768 pTlbe->GCPhys = GCPhysPg;
6769 pTlbe->pbMappingR3 = NULL;
6770 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
6771 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
6772 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
6773
6774 /* Resolve the physical address. */
6775 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6776 uint8_t *pbMemFullLoad = NULL;
6777 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6778 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
6779 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6780# ifdef IN_RING3
6781 pTlbe->pbMappingR3 = pbMemFullLoad;
6782# endif
6783 }
6784
6785 /*
6786 * Check the flags and physical revision.
6787 * Note! This will revalidate the uTlbPhysRev after a full load. This is
6788 * just to keep the code structure simple (i.e. avoid gotos or similar).
6789 */
6790 uint8_t *pbMem;
6791 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6792 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6793# ifdef IN_RING3
6794 pbMem = pTlbe->pbMappingR3;
6795# else
6796 pbMem = NULL;
6797# endif
6798 else
6799 {
6800 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
6801
6802 /*
6803 * Okay, something isn't quite right or needs refreshing.
6804 */
6805 /* Write to read only memory? */
6806 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6807 {
6808 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6809# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6810/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
6811 * to trigger an \#PG or a VM nested paging exit here yet! */
6812 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6813 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6814# endif
6815 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6816 }
6817
6818 /* Kernel memory accessed by userland? */
6819 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6820 {
6821 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6822# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6823/** @todo TLB: See above. */
6824 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6825 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6826# endif
6827 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6828 }
6829
6830 /*
6831 * Check if the physical page info needs updating.
6832 */
6833 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6834# ifdef IN_RING3
6835 pbMem = pTlbe->pbMappingR3;
6836# else
6837 pbMem = NULL;
6838# endif
6839 else
6840 {
6841 pTlbe->pbMappingR3 = NULL;
6842 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6843 pbMem = NULL;
6844 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6845 &pbMem, &pTlbe->fFlagsAndPhysRev);
6846 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6847# ifdef IN_RING3
6848 pTlbe->pbMappingR3 = pbMem;
6849# endif
6850 }
6851
6852 /*
6853 * Check the physical page level access and mapping.
6854 */
6855 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6856 { /* probably likely */ }
6857 else
6858 {
6859 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6860 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6861 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6862 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6863 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6864 if (rcStrict == VINF_SUCCESS)
6865 return pbMem;
6866 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6867 }
6868 }
6869 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6870
6871 if (pbMem)
6872 {
6873 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6874 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6875 fAccess |= IEM_ACCESS_NOT_LOCKED;
6876 }
6877 else
6878 {
6879 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6880 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6881 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6882 if (rcStrict == VINF_SUCCESS)
6883 {
6884 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6885 return pbMem;
6886 }
6887 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6888 }
6889
6890 void * const pvMem = pbMem;
6891
6892 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6893 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6894 if (fAccess & IEM_ACCESS_TYPE_READ)
6895 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6896
6897#else /* !IEM_WITH_DATA_TLB */
6898
6899
6900 RTGCPHYS GCPhysFirst;
6901 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6902 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6903 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6904
6905 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6906 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6907 if (fAccess & IEM_ACCESS_TYPE_READ)
6908 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6909
6910 void *pvMem;
6911 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6912 if (rcStrict == VINF_SUCCESS)
6913 { /* likely */ }
6914 else
6915 {
6916 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6917 if (rcStrict == VINF_SUCCESS)
6918 return pvMem;
6919 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6920 }
6921
6922#endif /* !IEM_WITH_DATA_TLB */
6923
6924 /*
6925 * Fill in the mapping table entry.
6926 */
6927 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6928 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6929 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6930 pVCpu->iem.s.cActiveMappings++;
6931
6932 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6933
6934 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6935 return pvMem;
6936}
6937
6938
6939/**
6940 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6941 *
6942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6943 * @param pvMem The mapping.
6944 * @param fAccess The kind of access.
6945 */
6946void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6947{
6948 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6949 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6950 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6951 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6952 == ((unsigned)bUnmapInfo >> 4),
6953 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6954
6955 /* If it's bounce buffered, we may need to write back the buffer. */
6956 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6957 {
6958 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6959 {
6960 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6961 if (rcStrict == VINF_SUCCESS)
6962 return;
6963 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6964 }
6965 }
6966 /* Otherwise unlock it. */
6967 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6968 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6969
6970 /* Free the entry. */
6971 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6972 Assert(pVCpu->iem.s.cActiveMappings != 0);
6973 pVCpu->iem.s.cActiveMappings--;
6974}
6975
6976
6977/** Fallback for iemMemCommitAndUnmapRwJmp. */
6978void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6979{
6980 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6981 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6982}
6983
6984
6985/** Fallback for iemMemCommitAndUnmapAtJmp. */
6986void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6987{
6988 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6989 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6990}
6991
6992
6993/** Fallback for iemMemCommitAndUnmapWoJmp. */
6994void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6995{
6996 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6997 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6998}
6999
7000
7001/** Fallback for iemMemCommitAndUnmapRoJmp. */
7002void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7003{
7004 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7005 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7006}
7007
7008
7009/** Fallback for iemMemRollbackAndUnmapWo. */
7010void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7011{
7012 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7013 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7014}
7015
7016#endif /* IEM_WITH_SETJMP */
7017
7018#ifndef IN_RING3
7019/**
7020 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7021 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7022 *
7023 * Allows the instruction to be completed and retired, while the IEM user will
7024 * return to ring-3 immediately afterwards and do the postponed writes there.
7025 *
7026 * @returns VBox status code (no strict statuses). Caller must check
7027 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7029 * @param pvMem The mapping.
7030 * @param fAccess The kind of access.
7031 */
7032VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7033{
7034 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7035 AssertMsgReturn( (bUnmapInfo & 0x08)
7036 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7037 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7038 == ((unsigned)bUnmapInfo >> 4),
7039 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7040 VERR_NOT_FOUND);
7041
7042 /* If it's bounce buffered, we may need to write back the buffer. */
7043 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7044 {
7045 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7046 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7047 }
7048 /* Otherwise unlock it. */
7049 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7050 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7051
7052 /* Free the entry. */
7053 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7054 Assert(pVCpu->iem.s.cActiveMappings != 0);
7055 pVCpu->iem.s.cActiveMappings--;
7056 return VINF_SUCCESS;
7057}
7058#endif
7059
7060
7061/**
7062 * Rollbacks mappings, releasing page locks and such.
7063 *
7064 * The caller shall only call this after checking cActiveMappings.
7065 *
7066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7067 */
7068void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7069{
7070 Assert(pVCpu->iem.s.cActiveMappings > 0);
7071
7072 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7073 while (iMemMap-- > 0)
7074 {
7075 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7076 if (fAccess != IEM_ACCESS_INVALID)
7077 {
7078 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7079 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7080 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7081 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7082 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7083 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7084 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7085 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7086 pVCpu->iem.s.cActiveMappings--;
7087 }
7088 }
7089}
7090
7091
7092/*
7093 * Instantiate R/W templates.
7094 */
7095#define TMPL_MEM_WITH_STACK
7096
7097#define TMPL_MEM_TYPE uint8_t
7098#define TMPL_MEM_FN_SUFF U8
7099#define TMPL_MEM_FMT_TYPE "%#04x"
7100#define TMPL_MEM_FMT_DESC "byte"
7101#include "IEMAllMemRWTmpl.cpp.h"
7102
7103#define TMPL_MEM_TYPE uint16_t
7104#define TMPL_MEM_FN_SUFF U16
7105#define TMPL_MEM_FMT_TYPE "%#06x"
7106#define TMPL_MEM_FMT_DESC "word"
7107#include "IEMAllMemRWTmpl.cpp.h"
7108
7109#define TMPL_WITH_PUSH_SREG
7110#define TMPL_MEM_TYPE uint32_t
7111#define TMPL_MEM_FN_SUFF U32
7112#define TMPL_MEM_FMT_TYPE "%#010x"
7113#define TMPL_MEM_FMT_DESC "dword"
7114#include "IEMAllMemRWTmpl.cpp.h"
7115#undef TMPL_WITH_PUSH_SREG
7116
7117#define TMPL_MEM_TYPE uint64_t
7118#define TMPL_MEM_FN_SUFF U64
7119#define TMPL_MEM_FMT_TYPE "%#018RX64"
7120#define TMPL_MEM_FMT_DESC "qword"
7121#include "IEMAllMemRWTmpl.cpp.h"
7122
7123#undef TMPL_MEM_WITH_STACK
7124
7125#define TMPL_MEM_TYPE uint64_t
7126#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7127#define TMPL_MEM_FN_SUFF U64AlignedU128
7128#define TMPL_MEM_FMT_TYPE "%#018RX64"
7129#define TMPL_MEM_FMT_DESC "qword"
7130#include "IEMAllMemRWTmpl.cpp.h"
7131
7132/* See IEMAllMemRWTmplInline.cpp.h */
7133#define TMPL_MEM_BY_REF
7134
7135#define TMPL_MEM_TYPE RTFLOAT80U
7136#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7137#define TMPL_MEM_FN_SUFF R80
7138#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7139#define TMPL_MEM_FMT_DESC "tword"
7140#include "IEMAllMemRWTmpl.cpp.h"
7141
7142#define TMPL_MEM_TYPE RTPBCD80U
7143#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7144#define TMPL_MEM_FN_SUFF D80
7145#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7146#define TMPL_MEM_FMT_DESC "tword"
7147#include "IEMAllMemRWTmpl.cpp.h"
7148
7149#define TMPL_MEM_TYPE RTUINT128U
7150#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7151#define TMPL_MEM_FN_SUFF U128
7152#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7153#define TMPL_MEM_FMT_DESC "dqword"
7154#include "IEMAllMemRWTmpl.cpp.h"
7155
7156#define TMPL_MEM_TYPE RTUINT128U
7157#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7158#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7159#define TMPL_MEM_FN_SUFF U128AlignedSse
7160#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7161#define TMPL_MEM_FMT_DESC "dqword"
7162#include "IEMAllMemRWTmpl.cpp.h"
7163
7164#define TMPL_MEM_TYPE RTUINT128U
7165#define TMPL_MEM_TYPE_ALIGN 0
7166#define TMPL_MEM_FN_SUFF U128NoAc
7167#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7168#define TMPL_MEM_FMT_DESC "dqword"
7169#include "IEMAllMemRWTmpl.cpp.h"
7170
7171#define TMPL_MEM_TYPE RTUINT256U
7172#define TMPL_MEM_TYPE_ALIGN 0
7173#define TMPL_MEM_FN_SUFF U256NoAc
7174#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7175#define TMPL_MEM_FMT_DESC "qqword"
7176#include "IEMAllMemRWTmpl.cpp.h"
7177
7178#define TMPL_MEM_TYPE RTUINT256U
7179#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7180#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7181#define TMPL_MEM_FN_SUFF U256AlignedAvx
7182#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7183#define TMPL_MEM_FMT_DESC "qqword"
7184#include "IEMAllMemRWTmpl.cpp.h"
7185
7186/**
7187 * Fetches a data dword and zero extends it to a qword.
7188 *
7189 * @returns Strict VBox status code.
7190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7191 * @param pu64Dst Where to return the qword.
7192 * @param iSegReg The index of the segment register to use for
7193 * this access. The base and limits are checked.
7194 * @param GCPtrMem The address of the guest memory.
7195 */
7196VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7197{
7198 /* The lazy approach for now... */
7199 uint8_t bUnmapInfo;
7200 uint32_t const *pu32Src;
7201 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7202 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7203 if (rc == VINF_SUCCESS)
7204 {
7205 *pu64Dst = *pu32Src;
7206 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7207 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7208 }
7209 return rc;
7210}
7211
7212
7213#ifdef SOME_UNUSED_FUNCTION
7214/**
7215 * Fetches a data dword and sign extends it to a qword.
7216 *
7217 * @returns Strict VBox status code.
7218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7219 * @param pu64Dst Where to return the sign extended value.
7220 * @param iSegReg The index of the segment register to use for
7221 * this access. The base and limits are checked.
7222 * @param GCPtrMem The address of the guest memory.
7223 */
7224VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7225{
7226 /* The lazy approach for now... */
7227 uint8_t bUnmapInfo;
7228 int32_t const *pi32Src;
7229 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7230 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7231 if (rc == VINF_SUCCESS)
7232 {
7233 *pu64Dst = *pi32Src;
7234 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7235 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7236 }
7237#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7238 else
7239 *pu64Dst = 0;
7240#endif
7241 return rc;
7242}
7243#endif
7244
7245
7246/**
7247 * Fetches a descriptor register (lgdt, lidt).
7248 *
7249 * @returns Strict VBox status code.
7250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7251 * @param pcbLimit Where to return the limit.
7252 * @param pGCPtrBase Where to return the base.
7253 * @param iSegReg The index of the segment register to use for
7254 * this access. The base and limits are checked.
7255 * @param GCPtrMem The address of the guest memory.
7256 * @param enmOpSize The effective operand size.
7257 */
7258VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7259 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7260{
7261 /*
7262 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7263 * little special:
7264 * - The two reads are done separately.
7265 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7266 * - We suspect the 386 to actually commit the limit before the base in
7267 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7268 * don't try emulate this eccentric behavior, because it's not well
7269 * enough understood and rather hard to trigger.
7270 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7271 */
7272 VBOXSTRICTRC rcStrict;
7273 if (IEM_IS_64BIT_CODE(pVCpu))
7274 {
7275 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7276 if (rcStrict == VINF_SUCCESS)
7277 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7278 }
7279 else
7280 {
7281 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7282 if (enmOpSize == IEMMODE_32BIT)
7283 {
7284 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7285 {
7286 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7287 if (rcStrict == VINF_SUCCESS)
7288 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7289 }
7290 else
7291 {
7292 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7293 if (rcStrict == VINF_SUCCESS)
7294 {
7295 *pcbLimit = (uint16_t)uTmp;
7296 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7297 }
7298 }
7299 if (rcStrict == VINF_SUCCESS)
7300 *pGCPtrBase = uTmp;
7301 }
7302 else
7303 {
7304 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7305 if (rcStrict == VINF_SUCCESS)
7306 {
7307 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7308 if (rcStrict == VINF_SUCCESS)
7309 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7310 }
7311 }
7312 }
7313 return rcStrict;
7314}
7315
7316
7317/**
7318 * Stores a data dqword, SSE aligned.
7319 *
7320 * @returns Strict VBox status code.
7321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 * @param u128Value The value to store.
7326 */
7327VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7328{
7329 /* The lazy approach for now... */
7330 uint8_t bUnmapInfo;
7331 PRTUINT128U pu128Dst;
7332 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7333 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7334 if (rc == VINF_SUCCESS)
7335 {
7336 pu128Dst->au64[0] = u128Value.au64[0];
7337 pu128Dst->au64[1] = u128Value.au64[1];
7338 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7339 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7340 }
7341 return rc;
7342}
7343
7344
7345#ifdef IEM_WITH_SETJMP
7346/**
7347 * Stores a data dqword, SSE aligned.
7348 *
7349 * @returns Strict VBox status code.
7350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7351 * @param iSegReg The index of the segment register to use for
7352 * this access. The base and limits are checked.
7353 * @param GCPtrMem The address of the guest memory.
7354 * @param u128Value The value to store.
7355 */
7356void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7357 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7358{
7359 /* The lazy approach for now... */
7360 uint8_t bUnmapInfo;
7361 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7362 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7363 pu128Dst->au64[0] = u128Value.au64[0];
7364 pu128Dst->au64[1] = u128Value.au64[1];
7365 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7366 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7367}
7368#endif
7369
7370
7371/**
7372 * Stores a data dqword.
7373 *
7374 * @returns Strict VBox status code.
7375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7376 * @param iSegReg The index of the segment register to use for
7377 * this access. The base and limits are checked.
7378 * @param GCPtrMem The address of the guest memory.
7379 * @param pu256Value Pointer to the value to store.
7380 */
7381VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7382{
7383 /* The lazy approach for now... */
7384 uint8_t bUnmapInfo;
7385 PRTUINT256U pu256Dst;
7386 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7387 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7388 if (rc == VINF_SUCCESS)
7389 {
7390 pu256Dst->au64[0] = pu256Value->au64[0];
7391 pu256Dst->au64[1] = pu256Value->au64[1];
7392 pu256Dst->au64[2] = pu256Value->au64[2];
7393 pu256Dst->au64[3] = pu256Value->au64[3];
7394 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7395 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7396 }
7397 return rc;
7398}
7399
7400
7401#ifdef IEM_WITH_SETJMP
7402/**
7403 * Stores a data dqword, longjmp on error.
7404 *
7405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7406 * @param iSegReg The index of the segment register to use for
7407 * this access. The base and limits are checked.
7408 * @param GCPtrMem The address of the guest memory.
7409 * @param pu256Value Pointer to the value to store.
7410 */
7411void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7412{
7413 /* The lazy approach for now... */
7414 uint8_t bUnmapInfo;
7415 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7416 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7417 pu256Dst->au64[0] = pu256Value->au64[0];
7418 pu256Dst->au64[1] = pu256Value->au64[1];
7419 pu256Dst->au64[2] = pu256Value->au64[2];
7420 pu256Dst->au64[3] = pu256Value->au64[3];
7421 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7422 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7423}
7424#endif
7425
7426
7427/**
7428 * Stores a descriptor register (sgdt, sidt).
7429 *
7430 * @returns Strict VBox status code.
7431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7432 * @param cbLimit The limit.
7433 * @param GCPtrBase The base address.
7434 * @param iSegReg The index of the segment register to use for
7435 * this access. The base and limits are checked.
7436 * @param GCPtrMem The address of the guest memory.
7437 */
7438VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7439{
7440 /*
7441 * The SIDT and SGDT instructions actually stores the data using two
7442 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7443 * does not respond to opsize prefixes.
7444 */
7445 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7446 if (rcStrict == VINF_SUCCESS)
7447 {
7448 if (IEM_IS_16BIT_CODE(pVCpu))
7449 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7450 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7451 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7452 else if (IEM_IS_32BIT_CODE(pVCpu))
7453 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7454 else
7455 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7456 }
7457 return rcStrict;
7458}
7459
7460
7461/**
7462 * Begin a special stack push (used by interrupt, exceptions and such).
7463 *
7464 * This will raise \#SS or \#PF if appropriate.
7465 *
7466 * @returns Strict VBox status code.
7467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7468 * @param cbMem The number of bytes to push onto the stack.
7469 * @param cbAlign The alignment mask (7, 3, 1).
7470 * @param ppvMem Where to return the pointer to the stack memory.
7471 * As with the other memory functions this could be
7472 * direct access or bounce buffered access, so
7473 * don't commit register until the commit call
7474 * succeeds.
7475 * @param pbUnmapInfo Where to store unmap info for
7476 * iemMemStackPushCommitSpecial.
7477 * @param puNewRsp Where to return the new RSP value. This must be
7478 * passed unchanged to
7479 * iemMemStackPushCommitSpecial().
7480 */
7481VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7482 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7483{
7484 Assert(cbMem < UINT8_MAX);
7485 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7486 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7487}
7488
7489
7490/**
7491 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7492 *
7493 * This will update the rSP.
7494 *
7495 * @returns Strict VBox status code.
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7498 * @param uNewRsp The new RSP value returned by
7499 * iemMemStackPushBeginSpecial().
7500 */
7501VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7502{
7503 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7504 if (rcStrict == VINF_SUCCESS)
7505 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7506 return rcStrict;
7507}
7508
7509
7510/**
7511 * Begin a special stack pop (used by iret, retf and such).
7512 *
7513 * This will raise \#SS or \#PF if appropriate.
7514 *
7515 * @returns Strict VBox status code.
7516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7517 * @param cbMem The number of bytes to pop from the stack.
7518 * @param cbAlign The alignment mask (7, 3, 1).
7519 * @param ppvMem Where to return the pointer to the stack memory.
7520 * @param pbUnmapInfo Where to store unmap info for
7521 * iemMemStackPopDoneSpecial.
7522 * @param puNewRsp Where to return the new RSP value. This must be
7523 * assigned to CPUMCTX::rsp manually some time
7524 * after iemMemStackPopDoneSpecial() has been
7525 * called.
7526 */
7527VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7528 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7529{
7530 Assert(cbMem < UINT8_MAX);
7531 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7532 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7533}
7534
7535
7536/**
7537 * Continue a special stack pop (used by iret and retf), for the purpose of
7538 * retrieving a new stack pointer.
7539 *
7540 * This will raise \#SS or \#PF if appropriate.
7541 *
7542 * @returns Strict VBox status code.
7543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7544 * @param off Offset from the top of the stack. This is zero
7545 * except in the retf case.
7546 * @param cbMem The number of bytes to pop from the stack.
7547 * @param ppvMem Where to return the pointer to the stack memory.
7548 * @param pbUnmapInfo Where to store unmap info for
7549 * iemMemStackPopDoneSpecial.
7550 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7551 * return this because all use of this function is
7552 * to retrieve a new value and anything we return
7553 * here would be discarded.)
7554 */
7555VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7556 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7557{
7558 Assert(cbMem < UINT8_MAX);
7559
7560 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7561 RTGCPTR GCPtrTop;
7562 if (IEM_IS_64BIT_CODE(pVCpu))
7563 GCPtrTop = uCurNewRsp;
7564 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7565 GCPtrTop = (uint32_t)uCurNewRsp;
7566 else
7567 GCPtrTop = (uint16_t)uCurNewRsp;
7568
7569 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7570 0 /* checked in iemMemStackPopBeginSpecial */);
7571}
7572
7573
7574/**
7575 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7576 * iemMemStackPopContinueSpecial).
7577 *
7578 * The caller will manually commit the rSP.
7579 *
7580 * @returns Strict VBox status code.
7581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7582 * @param bUnmapInfo Unmap information returned by
7583 * iemMemStackPopBeginSpecial() or
7584 * iemMemStackPopContinueSpecial().
7585 */
7586VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7587{
7588 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7589}
7590
7591
7592/**
7593 * Fetches a system table byte.
7594 *
7595 * @returns Strict VBox status code.
7596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7597 * @param pbDst Where to return the byte.
7598 * @param iSegReg The index of the segment register to use for
7599 * this access. The base and limits are checked.
7600 * @param GCPtrMem The address of the guest memory.
7601 */
7602VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7603{
7604 /* The lazy approach for now... */
7605 uint8_t bUnmapInfo;
7606 uint8_t const *pbSrc;
7607 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7608 if (rc == VINF_SUCCESS)
7609 {
7610 *pbDst = *pbSrc;
7611 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7612 }
7613 return rc;
7614}
7615
7616
7617/**
7618 * Fetches a system table word.
7619 *
7620 * @returns Strict VBox status code.
7621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7622 * @param pu16Dst Where to return the word.
7623 * @param iSegReg The index of the segment register to use for
7624 * this access. The base and limits are checked.
7625 * @param GCPtrMem The address of the guest memory.
7626 */
7627VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7628{
7629 /* The lazy approach for now... */
7630 uint8_t bUnmapInfo;
7631 uint16_t const *pu16Src;
7632 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7633 if (rc == VINF_SUCCESS)
7634 {
7635 *pu16Dst = *pu16Src;
7636 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7637 }
7638 return rc;
7639}
7640
7641
7642/**
7643 * Fetches a system table dword.
7644 *
7645 * @returns Strict VBox status code.
7646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7647 * @param pu32Dst Where to return the dword.
7648 * @param iSegReg The index of the segment register to use for
7649 * this access. The base and limits are checked.
7650 * @param GCPtrMem The address of the guest memory.
7651 */
7652VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7653{
7654 /* The lazy approach for now... */
7655 uint8_t bUnmapInfo;
7656 uint32_t const *pu32Src;
7657 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7658 if (rc == VINF_SUCCESS)
7659 {
7660 *pu32Dst = *pu32Src;
7661 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7662 }
7663 return rc;
7664}
7665
7666
7667/**
7668 * Fetches a system table qword.
7669 *
7670 * @returns Strict VBox status code.
7671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7672 * @param pu64Dst Where to return the qword.
7673 * @param iSegReg The index of the segment register to use for
7674 * this access. The base and limits are checked.
7675 * @param GCPtrMem The address of the guest memory.
7676 */
7677VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7678{
7679 /* The lazy approach for now... */
7680 uint8_t bUnmapInfo;
7681 uint64_t const *pu64Src;
7682 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7683 if (rc == VINF_SUCCESS)
7684 {
7685 *pu64Dst = *pu64Src;
7686 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7687 }
7688 return rc;
7689}
7690
7691
7692/**
7693 * Fetches a descriptor table entry with caller specified error code.
7694 *
7695 * @returns Strict VBox status code.
7696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7697 * @param pDesc Where to return the descriptor table entry.
7698 * @param uSel The selector which table entry to fetch.
7699 * @param uXcpt The exception to raise on table lookup error.
7700 * @param uErrorCode The error code associated with the exception.
7701 */
7702static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7703 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7704{
7705 AssertPtr(pDesc);
7706 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7707
7708 /** @todo did the 286 require all 8 bytes to be accessible? */
7709 /*
7710 * Get the selector table base and check bounds.
7711 */
7712 RTGCPTR GCPtrBase;
7713 if (uSel & X86_SEL_LDT)
7714 {
7715 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7716 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7717 {
7718 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7719 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7720 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7721 uErrorCode, 0);
7722 }
7723
7724 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7725 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7726 }
7727 else
7728 {
7729 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7730 {
7731 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7732 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7733 uErrorCode, 0);
7734 }
7735 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7736 }
7737
7738 /*
7739 * Read the legacy descriptor and maybe the long mode extensions if
7740 * required.
7741 */
7742 VBOXSTRICTRC rcStrict;
7743 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7744 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7745 else
7746 {
7747 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7748 if (rcStrict == VINF_SUCCESS)
7749 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7750 if (rcStrict == VINF_SUCCESS)
7751 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7752 if (rcStrict == VINF_SUCCESS)
7753 pDesc->Legacy.au16[3] = 0;
7754 else
7755 return rcStrict;
7756 }
7757
7758 if (rcStrict == VINF_SUCCESS)
7759 {
7760 if ( !IEM_IS_LONG_MODE(pVCpu)
7761 || pDesc->Legacy.Gen.u1DescType)
7762 pDesc->Long.au64[1] = 0;
7763 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7764 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7765 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7766 else
7767 {
7768 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7769 /** @todo is this the right exception? */
7770 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7771 }
7772 }
7773 return rcStrict;
7774}
7775
7776
7777/**
7778 * Fetches a descriptor table entry.
7779 *
7780 * @returns Strict VBox status code.
7781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7782 * @param pDesc Where to return the descriptor table entry.
7783 * @param uSel The selector which table entry to fetch.
7784 * @param uXcpt The exception to raise on table lookup error.
7785 */
7786VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
7787{
7788 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
7789}
7790
7791
7792/**
7793 * Marks the selector descriptor as accessed (only non-system descriptors).
7794 *
7795 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7796 * will therefore skip the limit checks.
7797 *
7798 * @returns Strict VBox status code.
7799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7800 * @param uSel The selector.
7801 */
7802VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
7803{
7804 /*
7805 * Get the selector table base and calculate the entry address.
7806 */
7807 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7808 ? pVCpu->cpum.GstCtx.ldtr.u64Base
7809 : pVCpu->cpum.GstCtx.gdtr.pGdt;
7810 GCPtr += uSel & X86_SEL_MASK;
7811
7812 /*
7813 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7814 * ugly stuff to avoid this. This will make sure it's an atomic access
7815 * as well more or less remove any question about 8-bit or 32-bit accesss.
7816 */
7817 VBOXSTRICTRC rcStrict;
7818 uint8_t bUnmapInfo;
7819 uint32_t volatile *pu32;
7820 if ((GCPtr & 3) == 0)
7821 {
7822 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7823 GCPtr += 2 + 2;
7824 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7825 if (rcStrict != VINF_SUCCESS)
7826 return rcStrict;
7827 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7828 }
7829 else
7830 {
7831 /* The misaligned GDT/LDT case, map the whole thing. */
7832 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7833 if (rcStrict != VINF_SUCCESS)
7834 return rcStrict;
7835 switch ((uintptr_t)pu32 & 3)
7836 {
7837 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7838 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7839 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7840 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7841 }
7842 }
7843
7844 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7845}
7846
7847
7848#undef LOG_GROUP
7849#define LOG_GROUP LOG_GROUP_IEM
7850
7851/** @} */
7852
7853/** @name Opcode Helpers.
7854 * @{
7855 */
7856
7857/**
7858 * Calculates the effective address of a ModR/M memory operand.
7859 *
7860 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7861 *
7862 * @return Strict VBox status code.
7863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7864 * @param bRm The ModRM byte.
7865 * @param cbImmAndRspOffset - First byte: The size of any immediate
7866 * following the effective address opcode bytes
7867 * (only for RIP relative addressing).
7868 * - Second byte: RSP displacement (for POP [ESP]).
7869 * @param pGCPtrEff Where to return the effective address.
7870 */
7871VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
7872{
7873 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7874# define SET_SS_DEF() \
7875 do \
7876 { \
7877 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7878 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
7879 } while (0)
7880
7881 if (!IEM_IS_64BIT_CODE(pVCpu))
7882 {
7883/** @todo Check the effective address size crap! */
7884 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
7885 {
7886 uint16_t u16EffAddr;
7887
7888 /* Handle the disp16 form with no registers first. */
7889 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7890 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7891 else
7892 {
7893 /* Get the displacment. */
7894 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7895 {
7896 case 0: u16EffAddr = 0; break;
7897 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7898 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7899 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
7900 }
7901
7902 /* Add the base and index registers to the disp. */
7903 switch (bRm & X86_MODRM_RM_MASK)
7904 {
7905 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
7906 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
7907 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
7908 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
7909 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
7910 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
7911 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
7912 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
7913 }
7914 }
7915
7916 *pGCPtrEff = u16EffAddr;
7917 }
7918 else
7919 {
7920 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
7921 uint32_t u32EffAddr;
7922
7923 /* Handle the disp32 form with no registers first. */
7924 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7925 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7926 else
7927 {
7928 /* Get the register (or SIB) value. */
7929 switch ((bRm & X86_MODRM_RM_MASK))
7930 {
7931 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
7932 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
7933 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
7934 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
7935 case 4: /* SIB */
7936 {
7937 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7938
7939 /* Get the index and scale it. */
7940 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7941 {
7942 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
7943 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
7944 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
7945 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
7946 case 4: u32EffAddr = 0; /*none */ break;
7947 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
7948 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
7949 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
7950 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7951 }
7952 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
7953
7954 /* add base */
7955 switch (bSib & X86_SIB_BASE_MASK)
7956 {
7957 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
7958 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
7959 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
7960 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
7961 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
7962 case 5:
7963 if ((bRm & X86_MODRM_MOD_MASK) != 0)
7964 {
7965 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
7966 SET_SS_DEF();
7967 }
7968 else
7969 {
7970 uint32_t u32Disp;
7971 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
7972 u32EffAddr += u32Disp;
7973 }
7974 break;
7975 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
7976 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
7977 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7978 }
7979 break;
7980 }
7981 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
7982 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
7983 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
7984 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7985 }
7986
7987 /* Get and add the displacement. */
7988 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7989 {
7990 case 0:
7991 break;
7992 case 1:
7993 {
7994 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
7995 u32EffAddr += i8Disp;
7996 break;
7997 }
7998 case 2:
7999 {
8000 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8001 u32EffAddr += u32Disp;
8002 break;
8003 }
8004 default:
8005 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8006 }
8007
8008 }
8009 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8010 *pGCPtrEff = u32EffAddr;
8011 }
8012 }
8013 else
8014 {
8015 uint64_t u64EffAddr;
8016
8017 /* Handle the rip+disp32 form with no registers first. */
8018 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8019 {
8020 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8021 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8022 }
8023 else
8024 {
8025 /* Get the register (or SIB) value. */
8026 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8027 {
8028 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8029 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8030 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8031 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8032 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8033 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8034 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8035 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8036 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8037 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8038 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8039 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8040 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8041 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8042 /* SIB */
8043 case 4:
8044 case 12:
8045 {
8046 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8047
8048 /* Get the index and scale it. */
8049 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8050 {
8051 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8052 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8053 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8054 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8055 case 4: u64EffAddr = 0; /*none */ break;
8056 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8057 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8058 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8059 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8060 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8061 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8062 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8063 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8064 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8065 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8066 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8067 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8068 }
8069 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8070
8071 /* add base */
8072 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8073 {
8074 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8075 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8076 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8077 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8078 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8079 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8080 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8081 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8082 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8083 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8084 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8085 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8086 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8087 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8088 /* complicated encodings */
8089 case 5:
8090 case 13:
8091 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8092 {
8093 if (!pVCpu->iem.s.uRexB)
8094 {
8095 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8096 SET_SS_DEF();
8097 }
8098 else
8099 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8100 }
8101 else
8102 {
8103 uint32_t u32Disp;
8104 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8105 u64EffAddr += (int32_t)u32Disp;
8106 }
8107 break;
8108 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8109 }
8110 break;
8111 }
8112 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8113 }
8114
8115 /* Get and add the displacement. */
8116 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8117 {
8118 case 0:
8119 break;
8120 case 1:
8121 {
8122 int8_t i8Disp;
8123 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8124 u64EffAddr += i8Disp;
8125 break;
8126 }
8127 case 2:
8128 {
8129 uint32_t u32Disp;
8130 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8131 u64EffAddr += (int32_t)u32Disp;
8132 break;
8133 }
8134 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8135 }
8136
8137 }
8138
8139 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8140 *pGCPtrEff = u64EffAddr;
8141 else
8142 {
8143 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8144 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8145 }
8146 }
8147
8148 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8149 return VINF_SUCCESS;
8150}
8151
8152
8153#ifdef IEM_WITH_SETJMP
8154/**
8155 * Calculates the effective address of a ModR/M memory operand.
8156 *
8157 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8158 *
8159 * May longjmp on internal error.
8160 *
8161 * @return The effective address.
8162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8163 * @param bRm The ModRM byte.
8164 * @param cbImmAndRspOffset - First byte: The size of any immediate
8165 * following the effective address opcode bytes
8166 * (only for RIP relative addressing).
8167 * - Second byte: RSP displacement (for POP [ESP]).
8168 */
8169RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8170{
8171 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8172# define SET_SS_DEF() \
8173 do \
8174 { \
8175 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8176 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8177 } while (0)
8178
8179 if (!IEM_IS_64BIT_CODE(pVCpu))
8180 {
8181/** @todo Check the effective address size crap! */
8182 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8183 {
8184 uint16_t u16EffAddr;
8185
8186 /* Handle the disp16 form with no registers first. */
8187 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8188 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8189 else
8190 {
8191 /* Get the displacment. */
8192 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8193 {
8194 case 0: u16EffAddr = 0; break;
8195 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8196 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8197 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8198 }
8199
8200 /* Add the base and index registers to the disp. */
8201 switch (bRm & X86_MODRM_RM_MASK)
8202 {
8203 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8204 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8205 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8206 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8207 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8208 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8209 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8210 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8211 }
8212 }
8213
8214 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8215 return u16EffAddr;
8216 }
8217
8218 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8219 uint32_t u32EffAddr;
8220
8221 /* Handle the disp32 form with no registers first. */
8222 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8223 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8224 else
8225 {
8226 /* Get the register (or SIB) value. */
8227 switch ((bRm & X86_MODRM_RM_MASK))
8228 {
8229 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8230 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8231 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8232 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8233 case 4: /* SIB */
8234 {
8235 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8236
8237 /* Get the index and scale it. */
8238 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8239 {
8240 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8241 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8242 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8243 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8244 case 4: u32EffAddr = 0; /*none */ break;
8245 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8246 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8247 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8248 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8249 }
8250 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8251
8252 /* add base */
8253 switch (bSib & X86_SIB_BASE_MASK)
8254 {
8255 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8256 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8257 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8258 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8259 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8260 case 5:
8261 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8262 {
8263 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8264 SET_SS_DEF();
8265 }
8266 else
8267 {
8268 uint32_t u32Disp;
8269 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8270 u32EffAddr += u32Disp;
8271 }
8272 break;
8273 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8274 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8275 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8276 }
8277 break;
8278 }
8279 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8280 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8281 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8282 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8283 }
8284
8285 /* Get and add the displacement. */
8286 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8287 {
8288 case 0:
8289 break;
8290 case 1:
8291 {
8292 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8293 u32EffAddr += i8Disp;
8294 break;
8295 }
8296 case 2:
8297 {
8298 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8299 u32EffAddr += u32Disp;
8300 break;
8301 }
8302 default:
8303 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8304 }
8305 }
8306
8307 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8308 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8309 return u32EffAddr;
8310 }
8311
8312 uint64_t u64EffAddr;
8313
8314 /* Handle the rip+disp32 form with no registers first. */
8315 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8316 {
8317 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8318 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8319 }
8320 else
8321 {
8322 /* Get the register (or SIB) value. */
8323 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8324 {
8325 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8326 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8327 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8328 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8329 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8330 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8331 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8332 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8333 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8334 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8335 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8336 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8337 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8338 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8339 /* SIB */
8340 case 4:
8341 case 12:
8342 {
8343 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8344
8345 /* Get the index and scale it. */
8346 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8347 {
8348 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8349 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8350 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8351 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8352 case 4: u64EffAddr = 0; /*none */ break;
8353 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8354 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8355 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8356 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8357 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8358 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8359 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8360 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8361 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8362 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8363 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8364 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8365 }
8366 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8367
8368 /* add base */
8369 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8370 {
8371 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8372 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8373 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8374 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8375 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8376 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8377 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8378 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8379 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8380 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8381 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8382 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8383 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8384 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8385 /* complicated encodings */
8386 case 5:
8387 case 13:
8388 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8389 {
8390 if (!pVCpu->iem.s.uRexB)
8391 {
8392 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8393 SET_SS_DEF();
8394 }
8395 else
8396 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8397 }
8398 else
8399 {
8400 uint32_t u32Disp;
8401 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8402 u64EffAddr += (int32_t)u32Disp;
8403 }
8404 break;
8405 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8406 }
8407 break;
8408 }
8409 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8410 }
8411
8412 /* Get and add the displacement. */
8413 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8414 {
8415 case 0:
8416 break;
8417 case 1:
8418 {
8419 int8_t i8Disp;
8420 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8421 u64EffAddr += i8Disp;
8422 break;
8423 }
8424 case 2:
8425 {
8426 uint32_t u32Disp;
8427 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8428 u64EffAddr += (int32_t)u32Disp;
8429 break;
8430 }
8431 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8432 }
8433
8434 }
8435
8436 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8437 {
8438 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8439 return u64EffAddr;
8440 }
8441 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8442 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8443 return u64EffAddr & UINT32_MAX;
8444}
8445#endif /* IEM_WITH_SETJMP */
8446
8447
8448/**
8449 * Calculates the effective address of a ModR/M memory operand, extended version
8450 * for use in the recompilers.
8451 *
8452 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8453 *
8454 * @return Strict VBox status code.
8455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8456 * @param bRm The ModRM byte.
8457 * @param cbImmAndRspOffset - First byte: The size of any immediate
8458 * following the effective address opcode bytes
8459 * (only for RIP relative addressing).
8460 * - Second byte: RSP displacement (for POP [ESP]).
8461 * @param pGCPtrEff Where to return the effective address.
8462 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8463 * SIB byte (bits 39:32).
8464 */
8465VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8466{
8467 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8468# define SET_SS_DEF() \
8469 do \
8470 { \
8471 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8472 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8473 } while (0)
8474
8475 uint64_t uInfo;
8476 if (!IEM_IS_64BIT_CODE(pVCpu))
8477 {
8478/** @todo Check the effective address size crap! */
8479 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8480 {
8481 uint16_t u16EffAddr;
8482
8483 /* Handle the disp16 form with no registers first. */
8484 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8485 {
8486 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8487 uInfo = u16EffAddr;
8488 }
8489 else
8490 {
8491 /* Get the displacment. */
8492 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8493 {
8494 case 0: u16EffAddr = 0; break;
8495 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8496 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8497 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8498 }
8499 uInfo = u16EffAddr;
8500
8501 /* Add the base and index registers to the disp. */
8502 switch (bRm & X86_MODRM_RM_MASK)
8503 {
8504 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8505 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8506 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8507 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8508 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8509 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8510 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8511 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8512 }
8513 }
8514
8515 *pGCPtrEff = u16EffAddr;
8516 }
8517 else
8518 {
8519 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8520 uint32_t u32EffAddr;
8521
8522 /* Handle the disp32 form with no registers first. */
8523 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8524 {
8525 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8526 uInfo = u32EffAddr;
8527 }
8528 else
8529 {
8530 /* Get the register (or SIB) value. */
8531 uInfo = 0;
8532 switch ((bRm & X86_MODRM_RM_MASK))
8533 {
8534 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8535 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8536 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8537 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8538 case 4: /* SIB */
8539 {
8540 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8541 uInfo = (uint64_t)bSib << 32;
8542
8543 /* Get the index and scale it. */
8544 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8545 {
8546 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8547 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8548 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8549 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8550 case 4: u32EffAddr = 0; /*none */ break;
8551 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8552 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8553 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8555 }
8556 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8557
8558 /* add base */
8559 switch (bSib & X86_SIB_BASE_MASK)
8560 {
8561 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8562 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8563 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8564 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8565 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8566 case 5:
8567 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8568 {
8569 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8570 SET_SS_DEF();
8571 }
8572 else
8573 {
8574 uint32_t u32Disp;
8575 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8576 u32EffAddr += u32Disp;
8577 uInfo |= u32Disp;
8578 }
8579 break;
8580 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8581 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8582 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8583 }
8584 break;
8585 }
8586 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8587 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8588 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8589 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8590 }
8591
8592 /* Get and add the displacement. */
8593 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8594 {
8595 case 0:
8596 break;
8597 case 1:
8598 {
8599 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8600 u32EffAddr += i8Disp;
8601 uInfo |= (uint32_t)(int32_t)i8Disp;
8602 break;
8603 }
8604 case 2:
8605 {
8606 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8607 u32EffAddr += u32Disp;
8608 uInfo |= (uint32_t)u32Disp;
8609 break;
8610 }
8611 default:
8612 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8613 }
8614
8615 }
8616 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8617 *pGCPtrEff = u32EffAddr;
8618 }
8619 }
8620 else
8621 {
8622 uint64_t u64EffAddr;
8623
8624 /* Handle the rip+disp32 form with no registers first. */
8625 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8626 {
8627 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8628 uInfo = (uint32_t)u64EffAddr;
8629 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8630 }
8631 else
8632 {
8633 /* Get the register (or SIB) value. */
8634 uInfo = 0;
8635 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8636 {
8637 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8638 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8639 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8640 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8641 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8642 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8643 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8644 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8645 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8646 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8647 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8648 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8649 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8650 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8651 /* SIB */
8652 case 4:
8653 case 12:
8654 {
8655 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8656 uInfo = (uint64_t)bSib << 32;
8657
8658 /* Get the index and scale it. */
8659 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8660 {
8661 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8662 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8663 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8664 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8665 case 4: u64EffAddr = 0; /*none */ break;
8666 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8667 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8668 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8669 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8670 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8671 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8672 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8673 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8674 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8675 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8676 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8677 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8678 }
8679 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8680
8681 /* add base */
8682 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8683 {
8684 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8685 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8686 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8687 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8688 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8689 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8690 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8691 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8692 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8693 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8694 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8695 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8696 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8697 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8698 /* complicated encodings */
8699 case 5:
8700 case 13:
8701 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8702 {
8703 if (!pVCpu->iem.s.uRexB)
8704 {
8705 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8706 SET_SS_DEF();
8707 }
8708 else
8709 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8710 }
8711 else
8712 {
8713 uint32_t u32Disp;
8714 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8715 u64EffAddr += (int32_t)u32Disp;
8716 uInfo |= u32Disp;
8717 }
8718 break;
8719 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8720 }
8721 break;
8722 }
8723 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8724 }
8725
8726 /* Get and add the displacement. */
8727 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8728 {
8729 case 0:
8730 break;
8731 case 1:
8732 {
8733 int8_t i8Disp;
8734 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8735 u64EffAddr += i8Disp;
8736 uInfo |= (uint32_t)(int32_t)i8Disp;
8737 break;
8738 }
8739 case 2:
8740 {
8741 uint32_t u32Disp;
8742 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8743 u64EffAddr += (int32_t)u32Disp;
8744 uInfo |= u32Disp;
8745 break;
8746 }
8747 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8748 }
8749
8750 }
8751
8752 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8753 *pGCPtrEff = u64EffAddr;
8754 else
8755 {
8756 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8757 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8758 }
8759 }
8760 *puInfo = uInfo;
8761
8762 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8763 return VINF_SUCCESS;
8764}
8765
8766/** @} */
8767
8768
8769#ifdef LOG_ENABLED
8770/**
8771 * Logs the current instruction.
8772 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8773 * @param fSameCtx Set if we have the same context information as the VMM,
8774 * clear if we may have already executed an instruction in
8775 * our debug context. When clear, we assume IEMCPU holds
8776 * valid CPU mode info.
8777 *
8778 * The @a fSameCtx parameter is now misleading and obsolete.
8779 * @param pszFunction The IEM function doing the execution.
8780 */
8781static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8782{
8783# ifdef IN_RING3
8784 if (LogIs2Enabled())
8785 {
8786 char szInstr[256];
8787 uint32_t cbInstr = 0;
8788 if (fSameCtx)
8789 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8790 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8791 szInstr, sizeof(szInstr), &cbInstr);
8792 else
8793 {
8794 uint32_t fFlags = 0;
8795 switch (IEM_GET_CPU_MODE(pVCpu))
8796 {
8797 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
8798 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
8799 case IEMMODE_16BIT:
8800 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
8801 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
8802 else
8803 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
8804 break;
8805 }
8806 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
8807 szInstr, sizeof(szInstr), &cbInstr);
8808 }
8809
8810 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8811 Log2(("**** %s fExec=%x\n"
8812 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8813 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
8814 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8815 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8816 " %s\n"
8817 , pszFunction, pVCpu->iem.s.fExec,
8818 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
8819 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
8820 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
8821 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
8822 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
8823 szInstr));
8824
8825 /* This stuff sucks atm. as it fills the log with MSRs. */
8826 //if (LogIs3Enabled())
8827 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
8828 }
8829 else
8830# endif
8831 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
8832 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
8833 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
8834}
8835#endif /* LOG_ENABLED */
8836
8837
8838#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8839/**
8840 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
8841 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
8842 *
8843 * @returns Modified rcStrict.
8844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8845 * @param rcStrict The instruction execution status.
8846 */
8847static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
8848{
8849 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
8850 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
8851 {
8852 /* VMX preemption timer takes priority over NMI-window exits. */
8853 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
8854 {
8855 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
8856 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
8857 }
8858 /*
8859 * Check remaining intercepts.
8860 *
8861 * NMI-window and Interrupt-window VM-exits.
8862 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
8863 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
8864 *
8865 * See Intel spec. 26.7.6 "NMI-Window Exiting".
8866 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
8867 */
8868 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
8869 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
8870 && !TRPMHasTrap(pVCpu))
8871 {
8872 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
8873 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
8874 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
8875 {
8876 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
8877 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
8878 }
8879 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
8880 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
8881 {
8882 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
8883 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
8884 }
8885 }
8886 }
8887 /* TPR-below threshold/APIC write has the highest priority. */
8888 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
8889 {
8890 rcStrict = iemVmxApicWriteEmulation(pVCpu);
8891 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
8892 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
8893 }
8894 /* MTF takes priority over VMX-preemption timer. */
8895 else
8896 {
8897 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
8898 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
8899 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
8900 }
8901 return rcStrict;
8902}
8903#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8904
8905
8906/**
8907 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8908 * IEMExecOneWithPrefetchedByPC.
8909 *
8910 * Similar code is found in IEMExecLots.
8911 *
8912 * @return Strict VBox status code.
8913 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8914 * @param fExecuteInhibit If set, execute the instruction following CLI,
8915 * POP SS and MOV SS,GR.
8916 * @param pszFunction The calling function name.
8917 */
8918DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
8919{
8920 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
8921 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
8922 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
8923 RT_NOREF_PV(pszFunction);
8924
8925#ifdef IEM_WITH_SETJMP
8926 VBOXSTRICTRC rcStrict;
8927 IEM_TRY_SETJMP(pVCpu, rcStrict)
8928 {
8929 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8930 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8931 }
8932 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
8933 {
8934 pVCpu->iem.s.cLongJumps++;
8935 }
8936 IEM_CATCH_LONGJMP_END(pVCpu);
8937#else
8938 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8939 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8940#endif
8941 if (rcStrict == VINF_SUCCESS)
8942 pVCpu->iem.s.cInstructions++;
8943 if (pVCpu->iem.s.cActiveMappings > 0)
8944 {
8945 Assert(rcStrict != VINF_SUCCESS);
8946 iemMemRollback(pVCpu);
8947 }
8948 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
8949 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
8950 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
8951
8952//#ifdef DEBUG
8953// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
8954//#endif
8955
8956#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8957 /*
8958 * Perform any VMX nested-guest instruction boundary actions.
8959 *
8960 * If any of these causes a VM-exit, we must skip executing the next
8961 * instruction (would run into stale page tables). A VM-exit makes sure
8962 * there is no interrupt-inhibition, so that should ensure we don't go
8963 * to try execute the next instruction. Clearing fExecuteInhibit is
8964 * problematic because of the setjmp/longjmp clobbering above.
8965 */
8966 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
8967 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
8968 || rcStrict != VINF_SUCCESS)
8969 { /* likely */ }
8970 else
8971 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
8972#endif
8973
8974 /* Execute the next instruction as well if a cli, pop ss or
8975 mov ss, Gr has just completed successfully. */
8976 if ( fExecuteInhibit
8977 && rcStrict == VINF_SUCCESS
8978 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
8979 {
8980 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
8981 if (rcStrict == VINF_SUCCESS)
8982 {
8983#ifdef LOG_ENABLED
8984 iemLogCurInstr(pVCpu, false, pszFunction);
8985#endif
8986#ifdef IEM_WITH_SETJMP
8987 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
8988 {
8989 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8990 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8991 }
8992 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
8993 {
8994 pVCpu->iem.s.cLongJumps++;
8995 }
8996 IEM_CATCH_LONGJMP_END(pVCpu);
8997#else
8998 IEM_OPCODE_GET_FIRST_U8(&b);
8999 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9000#endif
9001 if (rcStrict == VINF_SUCCESS)
9002 {
9003 pVCpu->iem.s.cInstructions++;
9004#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9005 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9006 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9007 { /* likely */ }
9008 else
9009 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9010#endif
9011 }
9012 if (pVCpu->iem.s.cActiveMappings > 0)
9013 {
9014 Assert(rcStrict != VINF_SUCCESS);
9015 iemMemRollback(pVCpu);
9016 }
9017 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9018 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9019 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9020 }
9021 else if (pVCpu->iem.s.cActiveMappings > 0)
9022 iemMemRollback(pVCpu);
9023 /** @todo drop this after we bake this change into RIP advancing. */
9024 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9025 }
9026
9027 /*
9028 * Return value fiddling, statistics and sanity assertions.
9029 */
9030 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9031
9032 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9033 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9034 return rcStrict;
9035}
9036
9037
9038/**
9039 * Execute one instruction.
9040 *
9041 * @return Strict VBox status code.
9042 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9043 */
9044VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9045{
9046 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9047#ifdef LOG_ENABLED
9048 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9049#endif
9050
9051 /*
9052 * Do the decoding and emulation.
9053 */
9054 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9055 if (rcStrict == VINF_SUCCESS)
9056 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9057 else if (pVCpu->iem.s.cActiveMappings > 0)
9058 iemMemRollback(pVCpu);
9059
9060 if (rcStrict != VINF_SUCCESS)
9061 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9062 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9063 return rcStrict;
9064}
9065
9066
9067VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9068{
9069 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9070 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9071 if (rcStrict == VINF_SUCCESS)
9072 {
9073 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9074 if (pcbWritten)
9075 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9076 }
9077 else if (pVCpu->iem.s.cActiveMappings > 0)
9078 iemMemRollback(pVCpu);
9079
9080 return rcStrict;
9081}
9082
9083
9084VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9085 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9086{
9087 VBOXSTRICTRC rcStrict;
9088 if ( cbOpcodeBytes
9089 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9090 {
9091 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9092#ifdef IEM_WITH_CODE_TLB
9093 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9094 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9095 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9096 pVCpu->iem.s.offCurInstrStart = 0;
9097 pVCpu->iem.s.offInstrNextByte = 0;
9098 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9099#else
9100 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9101 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9102#endif
9103 rcStrict = VINF_SUCCESS;
9104 }
9105 else
9106 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9107 if (rcStrict == VINF_SUCCESS)
9108 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9109 else if (pVCpu->iem.s.cActiveMappings > 0)
9110 iemMemRollback(pVCpu);
9111
9112 return rcStrict;
9113}
9114
9115
9116VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9117{
9118 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9119 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9120 if (rcStrict == VINF_SUCCESS)
9121 {
9122 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9123 if (pcbWritten)
9124 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9125 }
9126 else if (pVCpu->iem.s.cActiveMappings > 0)
9127 iemMemRollback(pVCpu);
9128
9129 return rcStrict;
9130}
9131
9132
9133VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9134 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9135{
9136 VBOXSTRICTRC rcStrict;
9137 if ( cbOpcodeBytes
9138 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9139 {
9140 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9141#ifdef IEM_WITH_CODE_TLB
9142 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9143 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9144 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9145 pVCpu->iem.s.offCurInstrStart = 0;
9146 pVCpu->iem.s.offInstrNextByte = 0;
9147 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9148#else
9149 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9150 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9151#endif
9152 rcStrict = VINF_SUCCESS;
9153 }
9154 else
9155 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9156 if (rcStrict == VINF_SUCCESS)
9157 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9158 else if (pVCpu->iem.s.cActiveMappings > 0)
9159 iemMemRollback(pVCpu);
9160
9161 return rcStrict;
9162}
9163
9164
9165/**
9166 * For handling split cacheline lock operations when the host has split-lock
9167 * detection enabled.
9168 *
9169 * This will cause the interpreter to disregard the lock prefix and implicit
9170 * locking (xchg).
9171 *
9172 * @returns Strict VBox status code.
9173 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9174 */
9175VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9176{
9177 /*
9178 * Do the decoding and emulation.
9179 */
9180 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9181 if (rcStrict == VINF_SUCCESS)
9182 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9183 else if (pVCpu->iem.s.cActiveMappings > 0)
9184 iemMemRollback(pVCpu);
9185
9186 if (rcStrict != VINF_SUCCESS)
9187 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9188 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9189 return rcStrict;
9190}
9191
9192
9193/**
9194 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9195 * inject a pending TRPM trap.
9196 */
9197VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9198{
9199 Assert(TRPMHasTrap(pVCpu));
9200
9201 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9202 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9203 {
9204 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9205#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9206 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9207 if (fIntrEnabled)
9208 {
9209 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9210 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9211 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9212 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9213 else
9214 {
9215 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9216 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9217 }
9218 }
9219#else
9220 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9221#endif
9222 if (fIntrEnabled)
9223 {
9224 uint8_t u8TrapNo;
9225 TRPMEVENT enmType;
9226 uint32_t uErrCode;
9227 RTGCPTR uCr2;
9228 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9229 AssertRC(rc2);
9230 Assert(enmType == TRPM_HARDWARE_INT);
9231 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9232
9233 TRPMResetTrap(pVCpu);
9234
9235#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9236 /* Injecting an event may cause a VM-exit. */
9237 if ( rcStrict != VINF_SUCCESS
9238 && rcStrict != VINF_IEM_RAISED_XCPT)
9239 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9240#else
9241 NOREF(rcStrict);
9242#endif
9243 }
9244 }
9245
9246 return VINF_SUCCESS;
9247}
9248
9249
9250VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9251{
9252 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9253 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9254 Assert(cMaxInstructions > 0);
9255
9256 /*
9257 * See if there is an interrupt pending in TRPM, inject it if we can.
9258 */
9259 /** @todo What if we are injecting an exception and not an interrupt? Is that
9260 * possible here? For now we assert it is indeed only an interrupt. */
9261 if (!TRPMHasTrap(pVCpu))
9262 { /* likely */ }
9263 else
9264 {
9265 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9266 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9267 { /*likely */ }
9268 else
9269 return rcStrict;
9270 }
9271
9272 /*
9273 * Initial decoder init w/ prefetch, then setup setjmp.
9274 */
9275 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9276 if (rcStrict == VINF_SUCCESS)
9277 {
9278#ifdef IEM_WITH_SETJMP
9279 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9280 IEM_TRY_SETJMP(pVCpu, rcStrict)
9281#endif
9282 {
9283 /*
9284 * The run loop. We limit ourselves to 4096 instructions right now.
9285 */
9286 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9287 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9288 for (;;)
9289 {
9290 /*
9291 * Log the state.
9292 */
9293#ifdef LOG_ENABLED
9294 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9295#endif
9296
9297 /*
9298 * Do the decoding and emulation.
9299 */
9300 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9301 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9302#ifdef VBOX_STRICT
9303 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9304#endif
9305 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9306 {
9307 Assert(pVCpu->iem.s.cActiveMappings == 0);
9308 pVCpu->iem.s.cInstructions++;
9309
9310#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9311 /* Perform any VMX nested-guest instruction boundary actions. */
9312 uint64_t fCpu = pVCpu->fLocalForcedActions;
9313 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9314 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9315 { /* likely */ }
9316 else
9317 {
9318 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9319 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9320 fCpu = pVCpu->fLocalForcedActions;
9321 else
9322 {
9323 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9324 break;
9325 }
9326 }
9327#endif
9328 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9329 {
9330#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9331 uint64_t fCpu = pVCpu->fLocalForcedActions;
9332#endif
9333 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9334 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9335 | VMCPU_FF_TLB_FLUSH
9336 | VMCPU_FF_UNHALT );
9337
9338 if (RT_LIKELY( ( !fCpu
9339 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9340 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9341 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9342 {
9343 if (--cMaxInstructionsGccStupidity > 0)
9344 {
9345 /* Poll timers every now an then according to the caller's specs. */
9346 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9347 || !TMTimerPollBool(pVM, pVCpu))
9348 {
9349 Assert(pVCpu->iem.s.cActiveMappings == 0);
9350 iemReInitDecoder(pVCpu);
9351 continue;
9352 }
9353 }
9354 }
9355 }
9356 Assert(pVCpu->iem.s.cActiveMappings == 0);
9357 }
9358 else if (pVCpu->iem.s.cActiveMappings > 0)
9359 iemMemRollback(pVCpu);
9360 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9361 break;
9362 }
9363 }
9364#ifdef IEM_WITH_SETJMP
9365 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9366 {
9367 if (pVCpu->iem.s.cActiveMappings > 0)
9368 iemMemRollback(pVCpu);
9369# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9370 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9371# endif
9372 pVCpu->iem.s.cLongJumps++;
9373 }
9374 IEM_CATCH_LONGJMP_END(pVCpu);
9375#endif
9376
9377 /*
9378 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9379 */
9380 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9381 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9382 }
9383 else
9384 {
9385 if (pVCpu->iem.s.cActiveMappings > 0)
9386 iemMemRollback(pVCpu);
9387
9388#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9389 /*
9390 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9391 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9392 */
9393 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9394#endif
9395 }
9396
9397 /*
9398 * Maybe re-enter raw-mode and log.
9399 */
9400 if (rcStrict != VINF_SUCCESS)
9401 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9402 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9403 if (pcInstructions)
9404 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9405 return rcStrict;
9406}
9407
9408
9409/**
9410 * Interface used by EMExecuteExec, does exit statistics and limits.
9411 *
9412 * @returns Strict VBox status code.
9413 * @param pVCpu The cross context virtual CPU structure.
9414 * @param fWillExit To be defined.
9415 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9416 * @param cMaxInstructions Maximum number of instructions to execute.
9417 * @param cMaxInstructionsWithoutExits
9418 * The max number of instructions without exits.
9419 * @param pStats Where to return statistics.
9420 */
9421VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9422 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9423{
9424 NOREF(fWillExit); /** @todo define flexible exit crits */
9425
9426 /*
9427 * Initialize return stats.
9428 */
9429 pStats->cInstructions = 0;
9430 pStats->cExits = 0;
9431 pStats->cMaxExitDistance = 0;
9432 pStats->cReserved = 0;
9433
9434 /*
9435 * Initial decoder init w/ prefetch, then setup setjmp.
9436 */
9437 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9438 if (rcStrict == VINF_SUCCESS)
9439 {
9440#ifdef IEM_WITH_SETJMP
9441 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9442 IEM_TRY_SETJMP(pVCpu, rcStrict)
9443#endif
9444 {
9445#ifdef IN_RING0
9446 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9447#endif
9448 uint32_t cInstructionSinceLastExit = 0;
9449
9450 /*
9451 * The run loop. We limit ourselves to 4096 instructions right now.
9452 */
9453 PVM pVM = pVCpu->CTX_SUFF(pVM);
9454 for (;;)
9455 {
9456 /*
9457 * Log the state.
9458 */
9459#ifdef LOG_ENABLED
9460 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9461#endif
9462
9463 /*
9464 * Do the decoding and emulation.
9465 */
9466 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9467
9468 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9469 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9470
9471 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9472 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9473 {
9474 pStats->cExits += 1;
9475 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9476 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9477 cInstructionSinceLastExit = 0;
9478 }
9479
9480 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9481 {
9482 Assert(pVCpu->iem.s.cActiveMappings == 0);
9483 pVCpu->iem.s.cInstructions++;
9484 pStats->cInstructions++;
9485 cInstructionSinceLastExit++;
9486
9487#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9488 /* Perform any VMX nested-guest instruction boundary actions. */
9489 uint64_t fCpu = pVCpu->fLocalForcedActions;
9490 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9491 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9492 { /* likely */ }
9493 else
9494 {
9495 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9496 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9497 fCpu = pVCpu->fLocalForcedActions;
9498 else
9499 {
9500 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9501 break;
9502 }
9503 }
9504#endif
9505 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9506 {
9507#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9508 uint64_t fCpu = pVCpu->fLocalForcedActions;
9509#endif
9510 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9511 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9512 | VMCPU_FF_TLB_FLUSH
9513 | VMCPU_FF_UNHALT );
9514 if (RT_LIKELY( ( ( !fCpu
9515 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9516 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9517 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9518 || pStats->cInstructions < cMinInstructions))
9519 {
9520 if (pStats->cInstructions < cMaxInstructions)
9521 {
9522 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9523 {
9524#ifdef IN_RING0
9525 if ( !fCheckPreemptionPending
9526 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9527#endif
9528 {
9529 Assert(pVCpu->iem.s.cActiveMappings == 0);
9530 iemReInitDecoder(pVCpu);
9531 continue;
9532 }
9533#ifdef IN_RING0
9534 rcStrict = VINF_EM_RAW_INTERRUPT;
9535 break;
9536#endif
9537 }
9538 }
9539 }
9540 Assert(!(fCpu & VMCPU_FF_IEM));
9541 }
9542 Assert(pVCpu->iem.s.cActiveMappings == 0);
9543 }
9544 else if (pVCpu->iem.s.cActiveMappings > 0)
9545 iemMemRollback(pVCpu);
9546 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9547 break;
9548 }
9549 }
9550#ifdef IEM_WITH_SETJMP
9551 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9552 {
9553 if (pVCpu->iem.s.cActiveMappings > 0)
9554 iemMemRollback(pVCpu);
9555 pVCpu->iem.s.cLongJumps++;
9556 }
9557 IEM_CATCH_LONGJMP_END(pVCpu);
9558#endif
9559
9560 /*
9561 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9562 */
9563 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9564 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9565 }
9566 else
9567 {
9568 if (pVCpu->iem.s.cActiveMappings > 0)
9569 iemMemRollback(pVCpu);
9570
9571#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9572 /*
9573 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9574 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9575 */
9576 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9577#endif
9578 }
9579
9580 /*
9581 * Maybe re-enter raw-mode and log.
9582 */
9583 if (rcStrict != VINF_SUCCESS)
9584 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9585 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9586 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9587 return rcStrict;
9588}
9589
9590
9591/**
9592 * Injects a trap, fault, abort, software interrupt or external interrupt.
9593 *
9594 * The parameter list matches TRPMQueryTrapAll pretty closely.
9595 *
9596 * @returns Strict VBox status code.
9597 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9598 * @param u8TrapNo The trap number.
9599 * @param enmType What type is it (trap/fault/abort), software
9600 * interrupt or hardware interrupt.
9601 * @param uErrCode The error code if applicable.
9602 * @param uCr2 The CR2 value if applicable.
9603 * @param cbInstr The instruction length (only relevant for
9604 * software interrupts).
9605 */
9606VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9607 uint8_t cbInstr)
9608{
9609 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9610#ifdef DBGFTRACE_ENABLED
9611 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9612 u8TrapNo, enmType, uErrCode, uCr2);
9613#endif
9614
9615 uint32_t fFlags;
9616 switch (enmType)
9617 {
9618 case TRPM_HARDWARE_INT:
9619 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9620 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9621 uErrCode = uCr2 = 0;
9622 break;
9623
9624 case TRPM_SOFTWARE_INT:
9625 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9626 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9627 uErrCode = uCr2 = 0;
9628 break;
9629
9630 case TRPM_TRAP:
9631 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9632 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9633 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9634 if (u8TrapNo == X86_XCPT_PF)
9635 fFlags |= IEM_XCPT_FLAGS_CR2;
9636 switch (u8TrapNo)
9637 {
9638 case X86_XCPT_DF:
9639 case X86_XCPT_TS:
9640 case X86_XCPT_NP:
9641 case X86_XCPT_SS:
9642 case X86_XCPT_PF:
9643 case X86_XCPT_AC:
9644 case X86_XCPT_GP:
9645 fFlags |= IEM_XCPT_FLAGS_ERR;
9646 break;
9647 }
9648 break;
9649
9650 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9651 }
9652
9653 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9654
9655 if (pVCpu->iem.s.cActiveMappings > 0)
9656 iemMemRollback(pVCpu);
9657
9658 return rcStrict;
9659}
9660
9661
9662/**
9663 * Injects the active TRPM event.
9664 *
9665 * @returns Strict VBox status code.
9666 * @param pVCpu The cross context virtual CPU structure.
9667 */
9668VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9669{
9670#ifndef IEM_IMPLEMENTS_TASKSWITCH
9671 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9672#else
9673 uint8_t u8TrapNo;
9674 TRPMEVENT enmType;
9675 uint32_t uErrCode;
9676 RTGCUINTPTR uCr2;
9677 uint8_t cbInstr;
9678 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9679 if (RT_FAILURE(rc))
9680 return rc;
9681
9682 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9683 * ICEBP \#DB injection as a special case. */
9684 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9685#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9686 if (rcStrict == VINF_SVM_VMEXIT)
9687 rcStrict = VINF_SUCCESS;
9688#endif
9689#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9690 if (rcStrict == VINF_VMX_VMEXIT)
9691 rcStrict = VINF_SUCCESS;
9692#endif
9693 /** @todo Are there any other codes that imply the event was successfully
9694 * delivered to the guest? See @bugref{6607}. */
9695 if ( rcStrict == VINF_SUCCESS
9696 || rcStrict == VINF_IEM_RAISED_XCPT)
9697 TRPMResetTrap(pVCpu);
9698
9699 return rcStrict;
9700#endif
9701}
9702
9703
9704VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9705{
9706 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9707 return VERR_NOT_IMPLEMENTED;
9708}
9709
9710
9711VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9712{
9713 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9714 return VERR_NOT_IMPLEMENTED;
9715}
9716
9717
9718/**
9719 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9720 *
9721 * This API ASSUMES that the caller has already verified that the guest code is
9722 * allowed to access the I/O port. (The I/O port is in the DX register in the
9723 * guest state.)
9724 *
9725 * @returns Strict VBox status code.
9726 * @param pVCpu The cross context virtual CPU structure.
9727 * @param cbValue The size of the I/O port access (1, 2, or 4).
9728 * @param enmAddrMode The addressing mode.
9729 * @param fRepPrefix Indicates whether a repeat prefix is used
9730 * (doesn't matter which for this instruction).
9731 * @param cbInstr The instruction length in bytes.
9732 * @param iEffSeg The effective segment address.
9733 * @param fIoChecked Whether the access to the I/O port has been
9734 * checked or not. It's typically checked in the
9735 * HM scenario.
9736 */
9737VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9738 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9739{
9740 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9741 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9742
9743 /*
9744 * State init.
9745 */
9746 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9747
9748 /*
9749 * Switch orgy for getting to the right handler.
9750 */
9751 VBOXSTRICTRC rcStrict;
9752 if (fRepPrefix)
9753 {
9754 switch (enmAddrMode)
9755 {
9756 case IEMMODE_16BIT:
9757 switch (cbValue)
9758 {
9759 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9760 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9761 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9762 default:
9763 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9764 }
9765 break;
9766
9767 case IEMMODE_32BIT:
9768 switch (cbValue)
9769 {
9770 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9771 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9772 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9773 default:
9774 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9775 }
9776 break;
9777
9778 case IEMMODE_64BIT:
9779 switch (cbValue)
9780 {
9781 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9782 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9783 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9784 default:
9785 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9786 }
9787 break;
9788
9789 default:
9790 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9791 }
9792 }
9793 else
9794 {
9795 switch (enmAddrMode)
9796 {
9797 case IEMMODE_16BIT:
9798 switch (cbValue)
9799 {
9800 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9801 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9802 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9803 default:
9804 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9805 }
9806 break;
9807
9808 case IEMMODE_32BIT:
9809 switch (cbValue)
9810 {
9811 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9812 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9813 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9814 default:
9815 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9816 }
9817 break;
9818
9819 case IEMMODE_64BIT:
9820 switch (cbValue)
9821 {
9822 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9823 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9824 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9825 default:
9826 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9827 }
9828 break;
9829
9830 default:
9831 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9832 }
9833 }
9834
9835 if (pVCpu->iem.s.cActiveMappings)
9836 iemMemRollback(pVCpu);
9837
9838 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9839}
9840
9841
9842/**
9843 * Interface for HM and EM for executing string I/O IN (read) instructions.
9844 *
9845 * This API ASSUMES that the caller has already verified that the guest code is
9846 * allowed to access the I/O port. (The I/O port is in the DX register in the
9847 * guest state.)
9848 *
9849 * @returns Strict VBox status code.
9850 * @param pVCpu The cross context virtual CPU structure.
9851 * @param cbValue The size of the I/O port access (1, 2, or 4).
9852 * @param enmAddrMode The addressing mode.
9853 * @param fRepPrefix Indicates whether a repeat prefix is used
9854 * (doesn't matter which for this instruction).
9855 * @param cbInstr The instruction length in bytes.
9856 * @param fIoChecked Whether the access to the I/O port has been
9857 * checked or not. It's typically checked in the
9858 * HM scenario.
9859 */
9860VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9861 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
9862{
9863 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9864
9865 /*
9866 * State init.
9867 */
9868 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9869
9870 /*
9871 * Switch orgy for getting to the right handler.
9872 */
9873 VBOXSTRICTRC rcStrict;
9874 if (fRepPrefix)
9875 {
9876 switch (enmAddrMode)
9877 {
9878 case IEMMODE_16BIT:
9879 switch (cbValue)
9880 {
9881 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
9882 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
9883 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
9884 default:
9885 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9886 }
9887 break;
9888
9889 case IEMMODE_32BIT:
9890 switch (cbValue)
9891 {
9892 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
9893 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
9894 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
9895 default:
9896 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9897 }
9898 break;
9899
9900 case IEMMODE_64BIT:
9901 switch (cbValue)
9902 {
9903 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
9904 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
9905 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
9906 default:
9907 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9908 }
9909 break;
9910
9911 default:
9912 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9913 }
9914 }
9915 else
9916 {
9917 switch (enmAddrMode)
9918 {
9919 case IEMMODE_16BIT:
9920 switch (cbValue)
9921 {
9922 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
9923 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
9924 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
9925 default:
9926 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9927 }
9928 break;
9929
9930 case IEMMODE_32BIT:
9931 switch (cbValue)
9932 {
9933 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
9934 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
9935 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
9936 default:
9937 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9938 }
9939 break;
9940
9941 case IEMMODE_64BIT:
9942 switch (cbValue)
9943 {
9944 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
9945 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
9946 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
9947 default:
9948 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9949 }
9950 break;
9951
9952 default:
9953 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9954 }
9955 }
9956
9957 if ( pVCpu->iem.s.cActiveMappings == 0
9958 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
9959 { /* likely */ }
9960 else
9961 {
9962 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
9963 iemMemRollback(pVCpu);
9964 }
9965 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9966}
9967
9968
9969/**
9970 * Interface for rawmode to write execute an OUT instruction.
9971 *
9972 * @returns Strict VBox status code.
9973 * @param pVCpu The cross context virtual CPU structure.
9974 * @param cbInstr The instruction length in bytes.
9975 * @param u16Port The port to read.
9976 * @param fImm Whether the port is specified using an immediate operand or
9977 * using the implicit DX register.
9978 * @param cbReg The register size.
9979 *
9980 * @remarks In ring-0 not all of the state needs to be synced in.
9981 */
9982VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
9983{
9984 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9985 Assert(cbReg <= 4 && cbReg != 3);
9986
9987 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9988 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
9989 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
9990 Assert(!pVCpu->iem.s.cActiveMappings);
9991 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9992}
9993
9994
9995/**
9996 * Interface for rawmode to write execute an IN instruction.
9997 *
9998 * @returns Strict VBox status code.
9999 * @param pVCpu The cross context virtual CPU structure.
10000 * @param cbInstr The instruction length in bytes.
10001 * @param u16Port The port to read.
10002 * @param fImm Whether the port is specified using an immediate operand or
10003 * using the implicit DX.
10004 * @param cbReg The register size.
10005 */
10006VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10007{
10008 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10009 Assert(cbReg <= 4 && cbReg != 3);
10010
10011 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10012 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10013 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10014 Assert(!pVCpu->iem.s.cActiveMappings);
10015 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10016}
10017
10018
10019/**
10020 * Interface for HM and EM to write to a CRx register.
10021 *
10022 * @returns Strict VBox status code.
10023 * @param pVCpu The cross context virtual CPU structure.
10024 * @param cbInstr The instruction length in bytes.
10025 * @param iCrReg The control register number (destination).
10026 * @param iGReg The general purpose register number (source).
10027 *
10028 * @remarks In ring-0 not all of the state needs to be synced in.
10029 */
10030VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10031{
10032 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10033 Assert(iCrReg < 16);
10034 Assert(iGReg < 16);
10035
10036 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10037 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10038 Assert(!pVCpu->iem.s.cActiveMappings);
10039 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10040}
10041
10042
10043/**
10044 * Interface for HM and EM to read from a CRx register.
10045 *
10046 * @returns Strict VBox status code.
10047 * @param pVCpu The cross context virtual CPU structure.
10048 * @param cbInstr The instruction length in bytes.
10049 * @param iGReg The general purpose register number (destination).
10050 * @param iCrReg The control register number (source).
10051 *
10052 * @remarks In ring-0 not all of the state needs to be synced in.
10053 */
10054VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10055{
10056 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10057 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10058 | CPUMCTX_EXTRN_APIC_TPR);
10059 Assert(iCrReg < 16);
10060 Assert(iGReg < 16);
10061
10062 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10063 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10064 Assert(!pVCpu->iem.s.cActiveMappings);
10065 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10066}
10067
10068
10069/**
10070 * Interface for HM and EM to write to a DRx register.
10071 *
10072 * @returns Strict VBox status code.
10073 * @param pVCpu The cross context virtual CPU structure.
10074 * @param cbInstr The instruction length in bytes.
10075 * @param iDrReg The debug register number (destination).
10076 * @param iGReg The general purpose register number (source).
10077 *
10078 * @remarks In ring-0 not all of the state needs to be synced in.
10079 */
10080VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10081{
10082 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10083 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10084 Assert(iDrReg < 8);
10085 Assert(iGReg < 16);
10086
10087 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10088 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10089 Assert(!pVCpu->iem.s.cActiveMappings);
10090 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10091}
10092
10093
10094/**
10095 * Interface for HM and EM to read from a DRx register.
10096 *
10097 * @returns Strict VBox status code.
10098 * @param pVCpu The cross context virtual CPU structure.
10099 * @param cbInstr The instruction length in bytes.
10100 * @param iGReg The general purpose register number (destination).
10101 * @param iDrReg The debug register number (source).
10102 *
10103 * @remarks In ring-0 not all of the state needs to be synced in.
10104 */
10105VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10106{
10107 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10108 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10109 Assert(iDrReg < 8);
10110 Assert(iGReg < 16);
10111
10112 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10113 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10114 Assert(!pVCpu->iem.s.cActiveMappings);
10115 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10116}
10117
10118
10119/**
10120 * Interface for HM and EM to clear the CR0[TS] bit.
10121 *
10122 * @returns Strict VBox status code.
10123 * @param pVCpu The cross context virtual CPU structure.
10124 * @param cbInstr The instruction length in bytes.
10125 *
10126 * @remarks In ring-0 not all of the state needs to be synced in.
10127 */
10128VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10129{
10130 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10131
10132 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10133 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10134 Assert(!pVCpu->iem.s.cActiveMappings);
10135 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10136}
10137
10138
10139/**
10140 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10141 *
10142 * @returns Strict VBox status code.
10143 * @param pVCpu The cross context virtual CPU structure.
10144 * @param cbInstr The instruction length in bytes.
10145 * @param uValue The value to load into CR0.
10146 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10147 * memory operand. Otherwise pass NIL_RTGCPTR.
10148 *
10149 * @remarks In ring-0 not all of the state needs to be synced in.
10150 */
10151VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10152{
10153 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10154
10155 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10156 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10157 Assert(!pVCpu->iem.s.cActiveMappings);
10158 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10159}
10160
10161
10162/**
10163 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10164 *
10165 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10166 *
10167 * @returns Strict VBox status code.
10168 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10169 * @param cbInstr The instruction length in bytes.
10170 * @remarks In ring-0 not all of the state needs to be synced in.
10171 * @thread EMT(pVCpu)
10172 */
10173VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10174{
10175 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10176
10177 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10178 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10179 Assert(!pVCpu->iem.s.cActiveMappings);
10180 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10181}
10182
10183
10184/**
10185 * Interface for HM and EM to emulate the WBINVD instruction.
10186 *
10187 * @returns Strict VBox status code.
10188 * @param pVCpu The cross context virtual CPU structure.
10189 * @param cbInstr The instruction length in bytes.
10190 *
10191 * @remarks In ring-0 not all of the state needs to be synced in.
10192 */
10193VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10194{
10195 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10196
10197 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10198 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10199 Assert(!pVCpu->iem.s.cActiveMappings);
10200 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10201}
10202
10203
10204/**
10205 * Interface for HM and EM to emulate the INVD instruction.
10206 *
10207 * @returns Strict VBox status code.
10208 * @param pVCpu The cross context virtual CPU structure.
10209 * @param cbInstr The instruction length in bytes.
10210 *
10211 * @remarks In ring-0 not all of the state needs to be synced in.
10212 */
10213VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10214{
10215 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10216
10217 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10218 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10219 Assert(!pVCpu->iem.s.cActiveMappings);
10220 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10221}
10222
10223
10224/**
10225 * Interface for HM and EM to emulate the INVLPG instruction.
10226 *
10227 * @returns Strict VBox status code.
10228 * @retval VINF_PGM_SYNC_CR3
10229 *
10230 * @param pVCpu The cross context virtual CPU structure.
10231 * @param cbInstr The instruction length in bytes.
10232 * @param GCPtrPage The effective address of the page to invalidate.
10233 *
10234 * @remarks In ring-0 not all of the state needs to be synced in.
10235 */
10236VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10237{
10238 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10239
10240 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10241 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10242 Assert(!pVCpu->iem.s.cActiveMappings);
10243 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10244}
10245
10246
10247/**
10248 * Interface for HM and EM to emulate the INVPCID instruction.
10249 *
10250 * @returns Strict VBox status code.
10251 * @retval VINF_PGM_SYNC_CR3
10252 *
10253 * @param pVCpu The cross context virtual CPU structure.
10254 * @param cbInstr The instruction length in bytes.
10255 * @param iEffSeg The effective segment register.
10256 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10257 * @param uType The invalidation type.
10258 *
10259 * @remarks In ring-0 not all of the state needs to be synced in.
10260 */
10261VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10262 uint64_t uType)
10263{
10264 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10265
10266 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10267 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10268 Assert(!pVCpu->iem.s.cActiveMappings);
10269 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10270}
10271
10272
10273/**
10274 * Interface for HM and EM to emulate the CPUID instruction.
10275 *
10276 * @returns Strict VBox status code.
10277 *
10278 * @param pVCpu The cross context virtual CPU structure.
10279 * @param cbInstr The instruction length in bytes.
10280 *
10281 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10282 */
10283VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10284{
10285 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10286 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10287
10288 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10289 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10290 Assert(!pVCpu->iem.s.cActiveMappings);
10291 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10292}
10293
10294
10295/**
10296 * Interface for HM and EM to emulate the RDPMC instruction.
10297 *
10298 * @returns Strict VBox status code.
10299 *
10300 * @param pVCpu The cross context virtual CPU structure.
10301 * @param cbInstr The instruction length in bytes.
10302 *
10303 * @remarks Not all of the state needs to be synced in.
10304 */
10305VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10306{
10307 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10308 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10309
10310 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10311 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10312 Assert(!pVCpu->iem.s.cActiveMappings);
10313 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10314}
10315
10316
10317/**
10318 * Interface for HM and EM to emulate the RDTSC instruction.
10319 *
10320 * @returns Strict VBox status code.
10321 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10322 *
10323 * @param pVCpu The cross context virtual CPU structure.
10324 * @param cbInstr The instruction length in bytes.
10325 *
10326 * @remarks Not all of the state needs to be synced in.
10327 */
10328VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10329{
10330 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10331 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10332
10333 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10334 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10335 Assert(!pVCpu->iem.s.cActiveMappings);
10336 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10337}
10338
10339
10340/**
10341 * Interface for HM and EM to emulate the RDTSCP instruction.
10342 *
10343 * @returns Strict VBox status code.
10344 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10345 *
10346 * @param pVCpu The cross context virtual CPU structure.
10347 * @param cbInstr The instruction length in bytes.
10348 *
10349 * @remarks Not all of the state needs to be synced in. Recommended
10350 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10351 */
10352VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10353{
10354 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10355 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10356
10357 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10358 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10359 Assert(!pVCpu->iem.s.cActiveMappings);
10360 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10361}
10362
10363
10364/**
10365 * Interface for HM and EM to emulate the RDMSR instruction.
10366 *
10367 * @returns Strict VBox status code.
10368 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10369 *
10370 * @param pVCpu The cross context virtual CPU structure.
10371 * @param cbInstr The instruction length in bytes.
10372 *
10373 * @remarks Not all of the state needs to be synced in. Requires RCX and
10374 * (currently) all MSRs.
10375 */
10376VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10377{
10378 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10379 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10380
10381 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10382 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10383 Assert(!pVCpu->iem.s.cActiveMappings);
10384 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10385}
10386
10387
10388/**
10389 * Interface for HM and EM to emulate the WRMSR instruction.
10390 *
10391 * @returns Strict VBox status code.
10392 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10393 *
10394 * @param pVCpu The cross context virtual CPU structure.
10395 * @param cbInstr The instruction length in bytes.
10396 *
10397 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10398 * and (currently) all MSRs.
10399 */
10400VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10401{
10402 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10403 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10404 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10405
10406 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10407 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10408 Assert(!pVCpu->iem.s.cActiveMappings);
10409 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10410}
10411
10412
10413/**
10414 * Interface for HM and EM to emulate the MONITOR instruction.
10415 *
10416 * @returns Strict VBox status code.
10417 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10418 *
10419 * @param pVCpu The cross context virtual CPU structure.
10420 * @param cbInstr The instruction length in bytes.
10421 *
10422 * @remarks Not all of the state needs to be synced in.
10423 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10424 * are used.
10425 */
10426VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10427{
10428 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10429 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10430
10431 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10432 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10433 Assert(!pVCpu->iem.s.cActiveMappings);
10434 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10435}
10436
10437
10438/**
10439 * Interface for HM and EM to emulate the MWAIT instruction.
10440 *
10441 * @returns Strict VBox status code.
10442 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10443 *
10444 * @param pVCpu The cross context virtual CPU structure.
10445 * @param cbInstr The instruction length in bytes.
10446 *
10447 * @remarks Not all of the state needs to be synced in.
10448 */
10449VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10450{
10451 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10452 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10453
10454 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10455 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10456 Assert(!pVCpu->iem.s.cActiveMappings);
10457 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10458}
10459
10460
10461/**
10462 * Interface for HM and EM to emulate the HLT instruction.
10463 *
10464 * @returns Strict VBox status code.
10465 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10466 *
10467 * @param pVCpu The cross context virtual CPU structure.
10468 * @param cbInstr The instruction length in bytes.
10469 *
10470 * @remarks Not all of the state needs to be synced in.
10471 */
10472VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10473{
10474 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10475
10476 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10477 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10478 Assert(!pVCpu->iem.s.cActiveMappings);
10479 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10480}
10481
10482
10483/**
10484 * Checks if IEM is in the process of delivering an event (interrupt or
10485 * exception).
10486 *
10487 * @returns true if we're in the process of raising an interrupt or exception,
10488 * false otherwise.
10489 * @param pVCpu The cross context virtual CPU structure.
10490 * @param puVector Where to store the vector associated with the
10491 * currently delivered event, optional.
10492 * @param pfFlags Where to store th event delivery flags (see
10493 * IEM_XCPT_FLAGS_XXX), optional.
10494 * @param puErr Where to store the error code associated with the
10495 * event, optional.
10496 * @param puCr2 Where to store the CR2 associated with the event,
10497 * optional.
10498 * @remarks The caller should check the flags to determine if the error code and
10499 * CR2 are valid for the event.
10500 */
10501VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10502{
10503 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10504 if (fRaisingXcpt)
10505 {
10506 if (puVector)
10507 *puVector = pVCpu->iem.s.uCurXcpt;
10508 if (pfFlags)
10509 *pfFlags = pVCpu->iem.s.fCurXcpt;
10510 if (puErr)
10511 *puErr = pVCpu->iem.s.uCurXcptErr;
10512 if (puCr2)
10513 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10514 }
10515 return fRaisingXcpt;
10516}
10517
10518#ifdef IN_RING3
10519
10520/**
10521 * Handles the unlikely and probably fatal merge cases.
10522 *
10523 * @returns Merged status code.
10524 * @param rcStrict Current EM status code.
10525 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10526 * with @a rcStrict.
10527 * @param iMemMap The memory mapping index. For error reporting only.
10528 * @param pVCpu The cross context virtual CPU structure of the calling
10529 * thread, for error reporting only.
10530 */
10531DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10532 unsigned iMemMap, PVMCPUCC pVCpu)
10533{
10534 if (RT_FAILURE_NP(rcStrict))
10535 return rcStrict;
10536
10537 if (RT_FAILURE_NP(rcStrictCommit))
10538 return rcStrictCommit;
10539
10540 if (rcStrict == rcStrictCommit)
10541 return rcStrictCommit;
10542
10543 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10544 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10545 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10546 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10548 return VERR_IOM_FF_STATUS_IPE;
10549}
10550
10551
10552/**
10553 * Helper for IOMR3ProcessForceFlag.
10554 *
10555 * @returns Merged status code.
10556 * @param rcStrict Current EM status code.
10557 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10558 * with @a rcStrict.
10559 * @param iMemMap The memory mapping index. For error reporting only.
10560 * @param pVCpu The cross context virtual CPU structure of the calling
10561 * thread, for error reporting only.
10562 */
10563DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10564{
10565 /* Simple. */
10566 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10567 return rcStrictCommit;
10568
10569 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10570 return rcStrict;
10571
10572 /* EM scheduling status codes. */
10573 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10574 && rcStrict <= VINF_EM_LAST))
10575 {
10576 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10577 && rcStrictCommit <= VINF_EM_LAST))
10578 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10579 }
10580
10581 /* Unlikely */
10582 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10583}
10584
10585
10586/**
10587 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10588 *
10589 * @returns Merge between @a rcStrict and what the commit operation returned.
10590 * @param pVM The cross context VM structure.
10591 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10592 * @param rcStrict The status code returned by ring-0 or raw-mode.
10593 */
10594VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10595{
10596 /*
10597 * Reset the pending commit.
10598 */
10599 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10600 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10601 ("%#x %#x %#x\n",
10602 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10603 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10604
10605 /*
10606 * Commit the pending bounce buffers (usually just one).
10607 */
10608 unsigned cBufs = 0;
10609 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10610 while (iMemMap-- > 0)
10611 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10612 {
10613 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10614 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10615 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10616
10617 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10618 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10619 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10620
10621 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10622 {
10623 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10624 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10625 pbBuf,
10626 cbFirst,
10627 PGMACCESSORIGIN_IEM);
10628 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10629 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10630 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10631 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10632 }
10633
10634 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10635 {
10636 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10637 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10638 pbBuf + cbFirst,
10639 cbSecond,
10640 PGMACCESSORIGIN_IEM);
10641 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10642 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10643 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10644 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10645 }
10646 cBufs++;
10647 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10648 }
10649
10650 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10651 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10652 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10653 pVCpu->iem.s.cActiveMappings = 0;
10654 return rcStrict;
10655}
10656
10657#endif /* IN_RING3 */
10658
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette