VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 102876

Last change on this file since 102876 was 102876, checked in by vboxsync, 14 months ago

VMM/IEM: Call different threaded functions for each branch in a conditional jump (jcc, loop, loopcc) so we can quit immediately when taking a different branch from what we did during compilation. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 443.1 KB
Line 
1/* $Id: IEMAll.cpp 102876 2024-01-15 14:26:27Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Set the accessed flags.
971 * ASSUMES this is set when the address is translated rather than on commit...
972 */
973 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
974 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
975 {
976 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
977 AssertRC(rc2);
978 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
979 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
980 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
981 }
982
983 /*
984 * Look up the physical page info if necessary.
985 */
986 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
987 { /* not necessary */ }
988 else
989 {
990 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
991 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
992 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
993 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
995 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
996 { /* likely */ }
997 else
998 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
999 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1000 | IEMTLBE_F_NO_MAPPINGR3
1001 | IEMTLBE_F_PG_NO_READ
1002 | IEMTLBE_F_PG_NO_WRITE
1003 | IEMTLBE_F_PG_UNASSIGNED
1004 | IEMTLBE_F_PG_CODE_PAGE);
1005 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1006 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1007 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1008 }
1009
1010# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1011 /*
1012 * Try do a direct read using the pbMappingR3 pointer.
1013 */
1014 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1015 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1016 {
1017 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1018 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1019 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1020 {
1021 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1022 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1023 }
1024 else
1025 {
1026 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1027 if (cbInstr + (uint32_t)cbDst <= 15)
1028 {
1029 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1030 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1031 }
1032 else
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1035 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1036 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1037 }
1038 }
1039 if (cbDst <= cbMaxRead)
1040 {
1041 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1042 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1043
1044 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1045 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1046 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1047 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1048 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1049 return;
1050 }
1051 pVCpu->iem.s.pbInstrBuf = NULL;
1052
1053 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1054 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1055 }
1056# else
1057# error "refactor as needed"
1058 /*
1059 * If there is no special read handling, so we can read a bit more and
1060 * put it in the prefetch buffer.
1061 */
1062 if ( cbDst < cbMaxRead
1063 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1064 {
1065 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1066 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1067 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1068 { /* likely */ }
1069 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1070 {
1071 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1072 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1073 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1074 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1075 }
1076 else
1077 {
1078 Log((RT_SUCCESS(rcStrict)
1079 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1080 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1083 }
1084 }
1085# endif
1086 /*
1087 * Special read handling, so only read exactly what's needed.
1088 * This is a highly unlikely scenario.
1089 */
1090 else
1091 {
1092 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1093
1094 /* Check instruction length. */
1095 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1096 if (RT_LIKELY(cbInstr + cbDst <= 15))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1102 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1103 }
1104
1105 /* Do the reading. */
1106 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1107 if (cbToRead > 0)
1108 {
1109 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1110 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1111 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1112 { /* likely */ }
1113 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1114 {
1115 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1116 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1117 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1118 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1119 }
1120 else
1121 {
1122 Log((RT_SUCCESS(rcStrict)
1123 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1124 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1125 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1126 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1127 }
1128 }
1129
1130 /* Update the state and probably return. */
1131 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1132 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1133 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1134
1135 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1136 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1137 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1138 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1139 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1140 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1141 pVCpu->iem.s.pbInstrBuf = NULL;
1142 if (cbToRead == cbDst)
1143 return;
1144 }
1145
1146 /*
1147 * More to read, loop.
1148 */
1149 cbDst -= cbMaxRead;
1150 pvDst = (uint8_t *)pvDst + cbMaxRead;
1151 }
1152# else /* !IN_RING3 */
1153 RT_NOREF(pvDst, cbDst);
1154 if (pvDst || cbDst)
1155 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1156# endif /* !IN_RING3 */
1157}
1158
1159#else /* !IEM_WITH_CODE_TLB */
1160
1161/**
1162 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1163 * exception if it fails.
1164 *
1165 * @returns Strict VBox status code.
1166 * @param pVCpu The cross context virtual CPU structure of the
1167 * calling thread.
1168 * @param cbMin The minimum number of bytes relative offOpcode
1169 * that must be read.
1170 */
1171VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1172{
1173 /*
1174 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1175 *
1176 * First translate CS:rIP to a physical address.
1177 */
1178 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1179 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1180 uint8_t const cbLeft = cbOpcode - offOpcode;
1181 Assert(cbLeft < cbMin);
1182 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1183
1184 uint32_t cbToTryRead;
1185 RTGCPTR GCPtrNext;
1186 if (IEM_IS_64BIT_CODE(pVCpu))
1187 {
1188 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1189 if (!IEM_IS_CANONICAL(GCPtrNext))
1190 return iemRaiseGeneralProtectionFault0(pVCpu);
1191 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1192 }
1193 else
1194 {
1195 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1196 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1197 GCPtrNext32 += cbOpcode;
1198 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1199 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1200 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1201 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1202 if (!cbToTryRead) /* overflowed */
1203 {
1204 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1205 cbToTryRead = UINT32_MAX;
1206 /** @todo check out wrapping around the code segment. */
1207 }
1208 if (cbToTryRead < cbMin - cbLeft)
1209 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1210 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1211
1212 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1213 if (cbToTryRead > cbLeftOnPage)
1214 cbToTryRead = cbLeftOnPage;
1215 }
1216
1217 /* Restrict to opcode buffer space.
1218
1219 We're making ASSUMPTIONS here based on work done previously in
1220 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1221 be fetched in case of an instruction crossing two pages. */
1222 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1223 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1224 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1225 { /* likely */ }
1226 else
1227 {
1228 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1229 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1230 return iemRaiseGeneralProtectionFault0(pVCpu);
1231 }
1232
1233 PGMPTWALK Walk;
1234 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1235 if (RT_FAILURE(rc))
1236 {
1237 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1238#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1239 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1240 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1241#endif
1242 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1243 }
1244 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1245 {
1246 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1250#endif
1251 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1252 }
1253 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1254 {
1255 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1257 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1258 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1259#endif
1260 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1261 }
1262 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1263 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1264 /** @todo Check reserved bits and such stuff. PGM is better at doing
1265 * that, so do it when implementing the guest virtual address
1266 * TLB... */
1267
1268 /*
1269 * Read the bytes at this address.
1270 *
1271 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1272 * and since PATM should only patch the start of an instruction there
1273 * should be no need to check again here.
1274 */
1275 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1276 {
1277 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1278 cbToTryRead, PGMACCESSORIGIN_IEM);
1279 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1280 { /* likely */ }
1281 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1282 {
1283 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1284 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1285 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1286 }
1287 else
1288 {
1289 Log((RT_SUCCESS(rcStrict)
1290 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1291 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1292 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1293 return rcStrict;
1294 }
1295 }
1296 else
1297 {
1298 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1299 if (RT_SUCCESS(rc))
1300 { /* likely */ }
1301 else
1302 {
1303 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1304 return rc;
1305 }
1306 }
1307 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1308 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1309
1310 return VINF_SUCCESS;
1311}
1312
1313#endif /* !IEM_WITH_CODE_TLB */
1314#ifndef IEM_WITH_SETJMP
1315
1316/**
1317 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1318 *
1319 * @returns Strict VBox status code.
1320 * @param pVCpu The cross context virtual CPU structure of the
1321 * calling thread.
1322 * @param pb Where to return the opcode byte.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1331 pVCpu->iem.s.offOpcode = offOpcode + 1;
1332 }
1333 else
1334 *pb = 0;
1335 return rcStrict;
1336}
1337
1338#else /* IEM_WITH_SETJMP */
1339
1340/**
1341 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1342 *
1343 * @returns The opcode byte.
1344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1345 */
1346uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1347{
1348# ifdef IEM_WITH_CODE_TLB
1349 uint8_t u8;
1350 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1351 return u8;
1352# else
1353 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1354 if (rcStrict == VINF_SUCCESS)
1355 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1356 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1357# endif
1358}
1359
1360#endif /* IEM_WITH_SETJMP */
1361
1362#ifndef IEM_WITH_SETJMP
1363
1364/**
1365 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1366 *
1367 * @returns Strict VBox status code.
1368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1369 * @param pu16 Where to return the opcode dword.
1370 */
1371VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1372{
1373 uint8_t u8;
1374 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1375 if (rcStrict == VINF_SUCCESS)
1376 *pu16 = (int8_t)u8;
1377 return rcStrict;
1378}
1379
1380
1381/**
1382 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1386 * @param pu32 Where to return the opcode dword.
1387 */
1388VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1389{
1390 uint8_t u8;
1391 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1392 if (rcStrict == VINF_SUCCESS)
1393 *pu32 = (int8_t)u8;
1394 return rcStrict;
1395}
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param pu64 Where to return the opcode qword.
1404 */
1405VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1406{
1407 uint8_t u8;
1408 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1409 if (rcStrict == VINF_SUCCESS)
1410 *pu64 = (int8_t)u8;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416
1417#ifndef IEM_WITH_SETJMP
1418
1419/**
1420 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1421 *
1422 * @returns Strict VBox status code.
1423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1424 * @param pu16 Where to return the opcode word.
1425 */
1426VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1427{
1428 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1429 if (rcStrict == VINF_SUCCESS)
1430 {
1431 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1432# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1433 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1434# else
1435 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1436# endif
1437 pVCpu->iem.s.offOpcode = offOpcode + 2;
1438 }
1439 else
1440 *pu16 = 0;
1441 return rcStrict;
1442}
1443
1444#else /* IEM_WITH_SETJMP */
1445
1446/**
1447 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1448 *
1449 * @returns The opcode word.
1450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1451 */
1452uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1453{
1454# ifdef IEM_WITH_CODE_TLB
1455 uint16_t u16;
1456 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1457 return u16;
1458# else
1459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1460 if (rcStrict == VINF_SUCCESS)
1461 {
1462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1463 pVCpu->iem.s.offOpcode += 2;
1464# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1465 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1466# else
1467 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1468# endif
1469 }
1470 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1471# endif
1472}
1473
1474#endif /* IEM_WITH_SETJMP */
1475
1476#ifndef IEM_WITH_SETJMP
1477
1478/**
1479 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1480 *
1481 * @returns Strict VBox status code.
1482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1483 * @param pu32 Where to return the opcode double word.
1484 */
1485VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1486{
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1492 pVCpu->iem.s.offOpcode = offOpcode + 2;
1493 }
1494 else
1495 *pu32 = 0;
1496 return rcStrict;
1497}
1498
1499
1500/**
1501 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1502 *
1503 * @returns Strict VBox status code.
1504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1505 * @param pu64 Where to return the opcode quad word.
1506 */
1507VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1508{
1509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1510 if (rcStrict == VINF_SUCCESS)
1511 {
1512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1513 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1514 pVCpu->iem.s.offOpcode = offOpcode + 2;
1515 }
1516 else
1517 *pu64 = 0;
1518 return rcStrict;
1519}
1520
1521#endif /* !IEM_WITH_SETJMP */
1522
1523#ifndef IEM_WITH_SETJMP
1524
1525/**
1526 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1527 *
1528 * @returns Strict VBox status code.
1529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1530 * @param pu32 Where to return the opcode dword.
1531 */
1532VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1533{
1534 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1535 if (rcStrict == VINF_SUCCESS)
1536 {
1537 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1539 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1540# else
1541 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1542 pVCpu->iem.s.abOpcode[offOpcode + 1],
1543 pVCpu->iem.s.abOpcode[offOpcode + 2],
1544 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1545# endif
1546 pVCpu->iem.s.offOpcode = offOpcode + 4;
1547 }
1548 else
1549 *pu32 = 0;
1550 return rcStrict;
1551}
1552
1553#else /* IEM_WITH_SETJMP */
1554
1555/**
1556 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1557 *
1558 * @returns The opcode dword.
1559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1560 */
1561uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1562{
1563# ifdef IEM_WITH_CODE_TLB
1564 uint32_t u32;
1565 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1566 return u32;
1567# else
1568 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1569 if (rcStrict == VINF_SUCCESS)
1570 {
1571 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1572 pVCpu->iem.s.offOpcode = offOpcode + 4;
1573# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1574 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1575# else
1576 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1577 pVCpu->iem.s.abOpcode[offOpcode + 1],
1578 pVCpu->iem.s.abOpcode[offOpcode + 2],
1579 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1580# endif
1581 }
1582 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1583# endif
1584}
1585
1586#endif /* IEM_WITH_SETJMP */
1587
1588#ifndef IEM_WITH_SETJMP
1589
1590/**
1591 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1592 *
1593 * @returns Strict VBox status code.
1594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1595 * @param pu64 Where to return the opcode dword.
1596 */
1597VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1598{
1599 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1600 if (rcStrict == VINF_SUCCESS)
1601 {
1602 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1603 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1604 pVCpu->iem.s.abOpcode[offOpcode + 1],
1605 pVCpu->iem.s.abOpcode[offOpcode + 2],
1606 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1607 pVCpu->iem.s.offOpcode = offOpcode + 4;
1608 }
1609 else
1610 *pu64 = 0;
1611 return rcStrict;
1612}
1613
1614
1615/**
1616 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1617 *
1618 * @returns Strict VBox status code.
1619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1620 * @param pu64 Where to return the opcode qword.
1621 */
1622VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1623{
1624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1625 if (rcStrict == VINF_SUCCESS)
1626 {
1627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1628 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1629 pVCpu->iem.s.abOpcode[offOpcode + 1],
1630 pVCpu->iem.s.abOpcode[offOpcode + 2],
1631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1632 pVCpu->iem.s.offOpcode = offOpcode + 4;
1633 }
1634 else
1635 *pu64 = 0;
1636 return rcStrict;
1637}
1638
1639#endif /* !IEM_WITH_SETJMP */
1640
1641#ifndef IEM_WITH_SETJMP
1642
1643/**
1644 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1648 * @param pu64 Where to return the opcode qword.
1649 */
1650VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1651{
1652 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1653 if (rcStrict == VINF_SUCCESS)
1654 {
1655 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1657 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1658# else
1659 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1660 pVCpu->iem.s.abOpcode[offOpcode + 1],
1661 pVCpu->iem.s.abOpcode[offOpcode + 2],
1662 pVCpu->iem.s.abOpcode[offOpcode + 3],
1663 pVCpu->iem.s.abOpcode[offOpcode + 4],
1664 pVCpu->iem.s.abOpcode[offOpcode + 5],
1665 pVCpu->iem.s.abOpcode[offOpcode + 6],
1666 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1667# endif
1668 pVCpu->iem.s.offOpcode = offOpcode + 8;
1669 }
1670 else
1671 *pu64 = 0;
1672 return rcStrict;
1673}
1674
1675#else /* IEM_WITH_SETJMP */
1676
1677/**
1678 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1679 *
1680 * @returns The opcode qword.
1681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1682 */
1683uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1684{
1685# ifdef IEM_WITH_CODE_TLB
1686 uint64_t u64;
1687 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1688 return u64;
1689# else
1690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1691 if (rcStrict == VINF_SUCCESS)
1692 {
1693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1694 pVCpu->iem.s.offOpcode = offOpcode + 8;
1695# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1696 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1697# else
1698 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1699 pVCpu->iem.s.abOpcode[offOpcode + 1],
1700 pVCpu->iem.s.abOpcode[offOpcode + 2],
1701 pVCpu->iem.s.abOpcode[offOpcode + 3],
1702 pVCpu->iem.s.abOpcode[offOpcode + 4],
1703 pVCpu->iem.s.abOpcode[offOpcode + 5],
1704 pVCpu->iem.s.abOpcode[offOpcode + 6],
1705 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1706# endif
1707 }
1708 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1709# endif
1710}
1711
1712#endif /* IEM_WITH_SETJMP */
1713
1714
1715
1716/** @name Misc Worker Functions.
1717 * @{
1718 */
1719
1720/**
1721 * Gets the exception class for the specified exception vector.
1722 *
1723 * @returns The class of the specified exception.
1724 * @param uVector The exception vector.
1725 */
1726static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1727{
1728 Assert(uVector <= X86_XCPT_LAST);
1729 switch (uVector)
1730 {
1731 case X86_XCPT_DE:
1732 case X86_XCPT_TS:
1733 case X86_XCPT_NP:
1734 case X86_XCPT_SS:
1735 case X86_XCPT_GP:
1736 case X86_XCPT_SX: /* AMD only */
1737 return IEMXCPTCLASS_CONTRIBUTORY;
1738
1739 case X86_XCPT_PF:
1740 case X86_XCPT_VE: /* Intel only */
1741 return IEMXCPTCLASS_PAGE_FAULT;
1742
1743 case X86_XCPT_DF:
1744 return IEMXCPTCLASS_DOUBLE_FAULT;
1745 }
1746 return IEMXCPTCLASS_BENIGN;
1747}
1748
1749
1750/**
1751 * Evaluates how to handle an exception caused during delivery of another event
1752 * (exception / interrupt).
1753 *
1754 * @returns How to handle the recursive exception.
1755 * @param pVCpu The cross context virtual CPU structure of the
1756 * calling thread.
1757 * @param fPrevFlags The flags of the previous event.
1758 * @param uPrevVector The vector of the previous event.
1759 * @param fCurFlags The flags of the current exception.
1760 * @param uCurVector The vector of the current exception.
1761 * @param pfXcptRaiseInfo Where to store additional information about the
1762 * exception condition. Optional.
1763 */
1764VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1765 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1766{
1767 /*
1768 * Only CPU exceptions can be raised while delivering other events, software interrupt
1769 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1770 */
1771 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1772 Assert(pVCpu); RT_NOREF(pVCpu);
1773 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1774
1775 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1776 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1777 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1778 {
1779 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1780 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1781 {
1782 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1783 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1784 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1785 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1786 {
1787 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1788 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1789 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1790 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1791 uCurVector, pVCpu->cpum.GstCtx.cr2));
1792 }
1793 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1794 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1795 {
1796 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1797 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1798 }
1799 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1800 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1802 {
1803 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1805 }
1806 }
1807 else
1808 {
1809 if (uPrevVector == X86_XCPT_NMI)
1810 {
1811 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1812 if (uCurVector == X86_XCPT_PF)
1813 {
1814 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1815 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1816 }
1817 }
1818 else if ( uPrevVector == X86_XCPT_AC
1819 && uCurVector == X86_XCPT_AC)
1820 {
1821 enmRaise = IEMXCPTRAISE_CPU_HANG;
1822 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1823 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1824 }
1825 }
1826 }
1827 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1828 {
1829 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1830 if (uCurVector == X86_XCPT_PF)
1831 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1832 }
1833 else
1834 {
1835 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1836 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1837 }
1838
1839 if (pfXcptRaiseInfo)
1840 *pfXcptRaiseInfo = fRaiseInfo;
1841 return enmRaise;
1842}
1843
1844
1845/**
1846 * Enters the CPU shutdown state initiated by a triple fault or other
1847 * unrecoverable conditions.
1848 *
1849 * @returns Strict VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the
1851 * calling thread.
1852 */
1853static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1854{
1855 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1856 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1857
1858 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1859 {
1860 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1861 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1862 }
1863
1864 RT_NOREF(pVCpu);
1865 return VINF_EM_TRIPLE_FAULT;
1866}
1867
1868
1869/**
1870 * Validates a new SS segment.
1871 *
1872 * @returns VBox strict status code.
1873 * @param pVCpu The cross context virtual CPU structure of the
1874 * calling thread.
1875 * @param NewSS The new SS selctor.
1876 * @param uCpl The CPL to load the stack for.
1877 * @param pDesc Where to return the descriptor.
1878 */
1879static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1880{
1881 /* Null selectors are not allowed (we're not called for dispatching
1882 interrupts with SS=0 in long mode). */
1883 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1884 {
1885 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1886 return iemRaiseTaskSwitchFault0(pVCpu);
1887 }
1888
1889 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1890 if ((NewSS & X86_SEL_RPL) != uCpl)
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1893 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1894 }
1895
1896 /*
1897 * Read the descriptor.
1898 */
1899 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1900 if (rcStrict != VINF_SUCCESS)
1901 return rcStrict;
1902
1903 /*
1904 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1905 */
1906 if (!pDesc->Legacy.Gen.u1DescType)
1907 {
1908 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1909 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1910 }
1911
1912 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1913 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1919 {
1920 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1921 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1922 }
1923
1924 /* Is it there? */
1925 /** @todo testcase: Is this checked before the canonical / limit check below? */
1926 if (!pDesc->Legacy.Gen.u1Present)
1927 {
1928 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1929 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1930 }
1931
1932 return VINF_SUCCESS;
1933}
1934
1935/** @} */
1936
1937
1938/** @name Raising Exceptions.
1939 *
1940 * @{
1941 */
1942
1943
1944/**
1945 * Loads the specified stack far pointer from the TSS.
1946 *
1947 * @returns VBox strict status code.
1948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1949 * @param uCpl The CPL to load the stack for.
1950 * @param pSelSS Where to return the new stack segment.
1951 * @param puEsp Where to return the new stack pointer.
1952 */
1953static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1954{
1955 VBOXSTRICTRC rcStrict;
1956 Assert(uCpl < 4);
1957
1958 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1959 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1960 {
1961 /*
1962 * 16-bit TSS (X86TSS16).
1963 */
1964 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1965 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1966 {
1967 uint32_t off = uCpl * 4 + 2;
1968 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1969 {
1970 /** @todo check actual access pattern here. */
1971 uint32_t u32Tmp = 0; /* gcc maybe... */
1972 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1973 if (rcStrict == VINF_SUCCESS)
1974 {
1975 *puEsp = RT_LOWORD(u32Tmp);
1976 *pSelSS = RT_HIWORD(u32Tmp);
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 else
1981 {
1982 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1983 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1984 }
1985 break;
1986 }
1987
1988 /*
1989 * 32-bit TSS (X86TSS32).
1990 */
1991 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1992 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1993 {
1994 uint32_t off = uCpl * 8 + 4;
1995 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1996 {
1997/** @todo check actual access pattern here. */
1998 uint64_t u64Tmp;
1999 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2000 if (rcStrict == VINF_SUCCESS)
2001 {
2002 *puEsp = u64Tmp & UINT32_MAX;
2003 *pSelSS = (RTSEL)(u64Tmp >> 32);
2004 return VINF_SUCCESS;
2005 }
2006 }
2007 else
2008 {
2009 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2010 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2011 }
2012 break;
2013 }
2014
2015 default:
2016 AssertFailed();
2017 rcStrict = VERR_IEM_IPE_4;
2018 break;
2019 }
2020
2021 *puEsp = 0; /* make gcc happy */
2022 *pSelSS = 0; /* make gcc happy */
2023 return rcStrict;
2024}
2025
2026
2027/**
2028 * Loads the specified stack pointer from the 64-bit TSS.
2029 *
2030 * @returns VBox strict status code.
2031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2032 * @param uCpl The CPL to load the stack for.
2033 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2034 * @param puRsp Where to return the new stack pointer.
2035 */
2036static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2037{
2038 Assert(uCpl < 4);
2039 Assert(uIst < 8);
2040 *puRsp = 0; /* make gcc happy */
2041
2042 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2043 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2044
2045 uint32_t off;
2046 if (uIst)
2047 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2048 else
2049 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2050 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2051 {
2052 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2053 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2054 }
2055
2056 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2057}
2058
2059
2060/**
2061 * Adjust the CPU state according to the exception being raised.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2064 * @param u8Vector The exception that has been raised.
2065 */
2066DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2067{
2068 switch (u8Vector)
2069 {
2070 case X86_XCPT_DB:
2071 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2072 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2073 break;
2074 /** @todo Read the AMD and Intel exception reference... */
2075 }
2076}
2077
2078
2079/**
2080 * Implements exceptions and interrupts for real mode.
2081 *
2082 * @returns VBox strict status code.
2083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2084 * @param cbInstr The number of bytes to offset rIP by in the return
2085 * address.
2086 * @param u8Vector The interrupt / exception vector number.
2087 * @param fFlags The flags.
2088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2090 */
2091static VBOXSTRICTRC
2092iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2093 uint8_t cbInstr,
2094 uint8_t u8Vector,
2095 uint32_t fFlags,
2096 uint16_t uErr,
2097 uint64_t uCr2) RT_NOEXCEPT
2098{
2099 NOREF(uErr); NOREF(uCr2);
2100 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2101
2102 /*
2103 * Read the IDT entry.
2104 */
2105 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2106 {
2107 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2109 }
2110 RTFAR16 Idte;
2111 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2113 {
2114 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2115 return rcStrict;
2116 }
2117
2118#ifdef LOG_ENABLED
2119 /* If software interrupt, try decode it if logging is enabled and such. */
2120 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2121 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2122 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2123#endif
2124
2125 /*
2126 * Push the stack frame.
2127 */
2128 uint8_t bUnmapInfo;
2129 uint16_t *pu16Frame;
2130 uint64_t uNewRsp;
2131 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2132 if (rcStrict != VINF_SUCCESS)
2133 return rcStrict;
2134
2135 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2136#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2137 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2138 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2139 fEfl |= UINT16_C(0xf000);
2140#endif
2141 pu16Frame[2] = (uint16_t)fEfl;
2142 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2143 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2144 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2145 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2146 return rcStrict;
2147
2148 /*
2149 * Load the vector address into cs:ip and make exception specific state
2150 * adjustments.
2151 */
2152 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2153 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2154 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2155 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2156 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2157 pVCpu->cpum.GstCtx.rip = Idte.off;
2158 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2159 IEMMISC_SET_EFL(pVCpu, fEfl);
2160
2161 /** @todo do we actually do this in real mode? */
2162 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2163 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2164
2165 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2166 so best leave them alone in case we're in a weird kind of real mode... */
2167
2168 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2169}
2170
2171
2172/**
2173 * Loads a NULL data selector into when coming from V8086 mode.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param pSReg Pointer to the segment register.
2177 */
2178DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2179{
2180 pSReg->Sel = 0;
2181 pSReg->ValidSel = 0;
2182 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2183 {
2184 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2185 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2186 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2187 }
2188 else
2189 {
2190 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2191 /** @todo check this on AMD-V */
2192 pSReg->u64Base = 0;
2193 pSReg->u32Limit = 0;
2194 }
2195}
2196
2197
2198/**
2199 * Loads a segment selector during a task switch in V8086 mode.
2200 *
2201 * @param pSReg Pointer to the segment register.
2202 * @param uSel The selector value to load.
2203 */
2204DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2205{
2206 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2207 pSReg->Sel = uSel;
2208 pSReg->ValidSel = uSel;
2209 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2210 pSReg->u64Base = uSel << 4;
2211 pSReg->u32Limit = 0xffff;
2212 pSReg->Attr.u = 0xf3;
2213}
2214
2215
2216/**
2217 * Loads a segment selector during a task switch in protected mode.
2218 *
2219 * In this task switch scenario, we would throw \#TS exceptions rather than
2220 * \#GPs.
2221 *
2222 * @returns VBox strict status code.
2223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2224 * @param pSReg Pointer to the segment register.
2225 * @param uSel The new selector value.
2226 *
2227 * @remarks This does _not_ handle CS or SS.
2228 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2229 */
2230static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2231{
2232 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2233
2234 /* Null data selector. */
2235 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2236 {
2237 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2239 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2240 return VINF_SUCCESS;
2241 }
2242
2243 /* Fetch the descriptor. */
2244 IEMSELDESC Desc;
2245 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2246 if (rcStrict != VINF_SUCCESS)
2247 {
2248 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2249 VBOXSTRICTRC_VAL(rcStrict)));
2250 return rcStrict;
2251 }
2252
2253 /* Must be a data segment or readable code segment. */
2254 if ( !Desc.Legacy.Gen.u1DescType
2255 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2256 {
2257 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2258 Desc.Legacy.Gen.u4Type));
2259 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* Check privileges for data segments and non-conforming code segments. */
2263 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2264 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2265 {
2266 /* The RPL and the new CPL must be less than or equal to the DPL. */
2267 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2268 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2269 {
2270 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2271 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2272 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2273 }
2274 }
2275
2276 /* Is it there? */
2277 if (!Desc.Legacy.Gen.u1Present)
2278 {
2279 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2280 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2281 }
2282
2283 /* The base and limit. */
2284 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2285 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2286
2287 /*
2288 * Ok, everything checked out fine. Now set the accessed bit before
2289 * committing the result into the registers.
2290 */
2291 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2292 {
2293 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2294 if (rcStrict != VINF_SUCCESS)
2295 return rcStrict;
2296 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2297 }
2298
2299 /* Commit */
2300 pSReg->Sel = uSel;
2301 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2302 pSReg->u32Limit = cbLimit;
2303 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2304 pSReg->ValidSel = uSel;
2305 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2306 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2307 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2308
2309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2310 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2311 return VINF_SUCCESS;
2312}
2313
2314
2315/**
2316 * Performs a task switch.
2317 *
2318 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2319 * caller is responsible for performing the necessary checks (like DPL, TSS
2320 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2321 * reference for JMP, CALL, IRET.
2322 *
2323 * If the task switch is the due to a software interrupt or hardware exception,
2324 * the caller is responsible for validating the TSS selector and descriptor. See
2325 * Intel Instruction reference for INT n.
2326 *
2327 * @returns VBox strict status code.
2328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2329 * @param enmTaskSwitch The cause of the task switch.
2330 * @param uNextEip The EIP effective after the task switch.
2331 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2332 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2333 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2334 * @param SelTss The TSS selector of the new task.
2335 * @param pNewDescTss Pointer to the new TSS descriptor.
2336 */
2337VBOXSTRICTRC
2338iemTaskSwitch(PVMCPUCC pVCpu,
2339 IEMTASKSWITCH enmTaskSwitch,
2340 uint32_t uNextEip,
2341 uint32_t fFlags,
2342 uint16_t uErr,
2343 uint64_t uCr2,
2344 RTSEL SelTss,
2345 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2346{
2347 Assert(!IEM_IS_REAL_MODE(pVCpu));
2348 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2349 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2350
2351 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2352 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2353 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2354 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2355 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2356
2357 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2358 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2359
2360 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2361 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2362
2363 /* Update CR2 in case it's a page-fault. */
2364 /** @todo This should probably be done much earlier in IEM/PGM. See
2365 * @bugref{5653#c49}. */
2366 if (fFlags & IEM_XCPT_FLAGS_CR2)
2367 pVCpu->cpum.GstCtx.cr2 = uCr2;
2368
2369 /*
2370 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2371 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2372 */
2373 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2374 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2375 if (uNewTssLimit < uNewTssLimitMin)
2376 {
2377 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2378 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2379 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2380 }
2381
2382 /*
2383 * Task switches in VMX non-root mode always cause task switches.
2384 * The new TSS must have been read and validated (DPL, limits etc.) before a
2385 * task-switch VM-exit commences.
2386 *
2387 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2388 */
2389 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2390 {
2391 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2392 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2393 }
2394
2395 /*
2396 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2397 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2398 */
2399 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2400 {
2401 uint32_t const uExitInfo1 = SelTss;
2402 uint32_t uExitInfo2 = uErr;
2403 switch (enmTaskSwitch)
2404 {
2405 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2406 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2407 default: break;
2408 }
2409 if (fFlags & IEM_XCPT_FLAGS_ERR)
2410 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2411 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2412 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2413
2414 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2415 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2416 RT_NOREF2(uExitInfo1, uExitInfo2);
2417 }
2418
2419 /*
2420 * Check the current TSS limit. The last written byte to the current TSS during the
2421 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2422 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2423 *
2424 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2425 * end up with smaller than "legal" TSS limits.
2426 */
2427 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2428 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2429 if (uCurTssLimit < uCurTssLimitMin)
2430 {
2431 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2432 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2433 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2434 }
2435
2436 /*
2437 * Verify that the new TSS can be accessed and map it. Map only the required contents
2438 * and not the entire TSS.
2439 */
2440 uint8_t bUnmapInfoNewTss;
2441 void *pvNewTss;
2442 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2443 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2444 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2445 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2446 * not perform correct translation if this happens. See Intel spec. 7.2.1
2447 * "Task-State Segment". */
2448 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2449/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2450 * Consider wrapping the remainder into a function for simpler cleanup. */
2451 if (rcStrict != VINF_SUCCESS)
2452 {
2453 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2454 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2455 return rcStrict;
2456 }
2457
2458 /*
2459 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2460 */
2461 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2462 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2463 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2464 {
2465 uint8_t bUnmapInfoDescCurTss;
2466 PX86DESC pDescCurTss;
2467 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2468 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2469 if (rcStrict != VINF_SUCCESS)
2470 {
2471 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2472 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2473 return rcStrict;
2474 }
2475
2476 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2477 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2478 if (rcStrict != VINF_SUCCESS)
2479 {
2480 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2481 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2482 return rcStrict;
2483 }
2484
2485 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2486 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2487 {
2488 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2489 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2490 fEFlags &= ~X86_EFL_NT;
2491 }
2492 }
2493
2494 /*
2495 * Save the CPU state into the current TSS.
2496 */
2497 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2498 if (GCPtrNewTss == GCPtrCurTss)
2499 {
2500 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2501 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2502 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2503 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2504 pVCpu->cpum.GstCtx.ldtr.Sel));
2505 }
2506 if (fIsNewTss386)
2507 {
2508 /*
2509 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2510 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2511 */
2512 uint8_t bUnmapInfoCurTss32;
2513 void *pvCurTss32;
2514 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2515 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2516 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2517 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2518 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2522 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525
2526 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2527 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2528 pCurTss32->eip = uNextEip;
2529 pCurTss32->eflags = fEFlags;
2530 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2531 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2532 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2533 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2534 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2535 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2536 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2537 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2538 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2539 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2540 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2541 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2542 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2543 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2544
2545 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2546 if (rcStrict != VINF_SUCCESS)
2547 {
2548 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2549 VBOXSTRICTRC_VAL(rcStrict)));
2550 return rcStrict;
2551 }
2552 }
2553 else
2554 {
2555 /*
2556 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2557 */
2558 uint8_t bUnmapInfoCurTss16;
2559 void *pvCurTss16;
2560 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2561 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2562 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2563 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2564 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2565 if (rcStrict != VINF_SUCCESS)
2566 {
2567 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2568 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2569 return rcStrict;
2570 }
2571
2572 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2573 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2574 pCurTss16->ip = uNextEip;
2575 pCurTss16->flags = (uint16_t)fEFlags;
2576 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2577 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2578 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2579 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2580 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2581 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2582 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2583 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2584 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2585 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2586 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2587 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2588
2589 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2590 if (rcStrict != VINF_SUCCESS)
2591 {
2592 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2593 VBOXSTRICTRC_VAL(rcStrict)));
2594 return rcStrict;
2595 }
2596 }
2597
2598 /*
2599 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2600 */
2601 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2602 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2603 {
2604 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2605 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2606 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2607 }
2608
2609 /*
2610 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2611 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2612 */
2613 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2614 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2615 bool fNewDebugTrap;
2616 if (fIsNewTss386)
2617 {
2618 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2619 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2620 uNewEip = pNewTss32->eip;
2621 uNewEflags = pNewTss32->eflags;
2622 uNewEax = pNewTss32->eax;
2623 uNewEcx = pNewTss32->ecx;
2624 uNewEdx = pNewTss32->edx;
2625 uNewEbx = pNewTss32->ebx;
2626 uNewEsp = pNewTss32->esp;
2627 uNewEbp = pNewTss32->ebp;
2628 uNewEsi = pNewTss32->esi;
2629 uNewEdi = pNewTss32->edi;
2630 uNewES = pNewTss32->es;
2631 uNewCS = pNewTss32->cs;
2632 uNewSS = pNewTss32->ss;
2633 uNewDS = pNewTss32->ds;
2634 uNewFS = pNewTss32->fs;
2635 uNewGS = pNewTss32->gs;
2636 uNewLdt = pNewTss32->selLdt;
2637 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2638 }
2639 else
2640 {
2641 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2642 uNewCr3 = 0;
2643 uNewEip = pNewTss16->ip;
2644 uNewEflags = pNewTss16->flags;
2645 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2646 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2647 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2648 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2649 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2650 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2651 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2652 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2653 uNewES = pNewTss16->es;
2654 uNewCS = pNewTss16->cs;
2655 uNewSS = pNewTss16->ss;
2656 uNewDS = pNewTss16->ds;
2657 uNewFS = 0;
2658 uNewGS = 0;
2659 uNewLdt = pNewTss16->selLdt;
2660 fNewDebugTrap = false;
2661 }
2662
2663 if (GCPtrNewTss == GCPtrCurTss)
2664 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2665 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2666
2667 /*
2668 * We're done accessing the new TSS.
2669 */
2670 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2671 if (rcStrict != VINF_SUCCESS)
2672 {
2673 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2674 return rcStrict;
2675 }
2676
2677 /*
2678 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2679 */
2680 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2681 {
2682 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2683 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2684 if (rcStrict != VINF_SUCCESS)
2685 {
2686 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2687 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2688 return rcStrict;
2689 }
2690
2691 /* Check that the descriptor indicates the new TSS is available (not busy). */
2692 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2693 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2694 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2695
2696 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2697 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2698 if (rcStrict != VINF_SUCCESS)
2699 {
2700 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2701 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2702 return rcStrict;
2703 }
2704 }
2705
2706 /*
2707 * From this point on, we're technically in the new task. We will defer exceptions
2708 * until the completion of the task switch but before executing any instructions in the new task.
2709 */
2710 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2711 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2712 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2713 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2714 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2715 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2716 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2717
2718 /* Set the busy bit in TR. */
2719 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2720
2721 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2722 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2723 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2724 {
2725 uNewEflags |= X86_EFL_NT;
2726 }
2727
2728 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2729 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2730 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2731
2732 pVCpu->cpum.GstCtx.eip = uNewEip;
2733 pVCpu->cpum.GstCtx.eax = uNewEax;
2734 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2735 pVCpu->cpum.GstCtx.edx = uNewEdx;
2736 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2737 pVCpu->cpum.GstCtx.esp = uNewEsp;
2738 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2739 pVCpu->cpum.GstCtx.esi = uNewEsi;
2740 pVCpu->cpum.GstCtx.edi = uNewEdi;
2741
2742 uNewEflags &= X86_EFL_LIVE_MASK;
2743 uNewEflags |= X86_EFL_RA1_MASK;
2744 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2745
2746 /*
2747 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2748 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2749 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2750 */
2751 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2752 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2753
2754 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2755 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2756
2757 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2758 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2759
2760 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2761 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2762
2763 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2764 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2765
2766 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2767 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2768 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2769
2770 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2771 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2772 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2774
2775 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2776 {
2777 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2778 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2779 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2780 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2781 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2782 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2783 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2784 }
2785
2786 /*
2787 * Switch CR3 for the new task.
2788 */
2789 if ( fIsNewTss386
2790 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2791 {
2792 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2793 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2794 AssertRCSuccessReturn(rc, rc);
2795
2796 /* Inform PGM. */
2797 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2798 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2799 AssertRCReturn(rc, rc);
2800 /* ignore informational status codes */
2801
2802 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2803 }
2804
2805 /*
2806 * Switch LDTR for the new task.
2807 */
2808 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2809 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2810 else
2811 {
2812 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2813
2814 IEMSELDESC DescNewLdt;
2815 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2816 if (rcStrict != VINF_SUCCESS)
2817 {
2818 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2819 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2820 return rcStrict;
2821 }
2822 if ( !DescNewLdt.Legacy.Gen.u1Present
2823 || DescNewLdt.Legacy.Gen.u1DescType
2824 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2825 {
2826 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2827 uNewLdt, DescNewLdt.Legacy.u));
2828 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2829 }
2830
2831 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2832 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2833 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2834 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2835 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2836 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2837 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2839 }
2840
2841 IEMSELDESC DescSS;
2842 if (IEM_IS_V86_MODE(pVCpu))
2843 {
2844 IEM_SET_CPL(pVCpu, 3);
2845 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2846 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2847 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2848 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2849 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2850 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2851
2852 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2853 DescSS.Legacy.u = 0;
2854 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2855 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2856 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2857 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2858 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2859 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2860 DescSS.Legacy.Gen.u2Dpl = 3;
2861 }
2862 else
2863 {
2864 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2865
2866 /*
2867 * Load the stack segment for the new task.
2868 */
2869 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2870 {
2871 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2872 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2873 }
2874
2875 /* Fetch the descriptor. */
2876 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2877 if (rcStrict != VINF_SUCCESS)
2878 {
2879 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2880 VBOXSTRICTRC_VAL(rcStrict)));
2881 return rcStrict;
2882 }
2883
2884 /* SS must be a data segment and writable. */
2885 if ( !DescSS.Legacy.Gen.u1DescType
2886 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2887 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2888 {
2889 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2890 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2891 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2892 }
2893
2894 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2895 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2896 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2897 {
2898 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2899 uNewCpl));
2900 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2901 }
2902
2903 /* Is it there? */
2904 if (!DescSS.Legacy.Gen.u1Present)
2905 {
2906 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2907 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2911 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2912
2913 /* Set the accessed bit before committing the result into SS. */
2914 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2915 {
2916 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2917 if (rcStrict != VINF_SUCCESS)
2918 return rcStrict;
2919 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2920 }
2921
2922 /* Commit SS. */
2923 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2924 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2925 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2926 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2927 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2928 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2929 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2930
2931 /* CPL has changed, update IEM before loading rest of segments. */
2932 IEM_SET_CPL(pVCpu, uNewCpl);
2933
2934 /*
2935 * Load the data segments for the new task.
2936 */
2937 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2941 if (rcStrict != VINF_SUCCESS)
2942 return rcStrict;
2943 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2944 if (rcStrict != VINF_SUCCESS)
2945 return rcStrict;
2946 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2947 if (rcStrict != VINF_SUCCESS)
2948 return rcStrict;
2949
2950 /*
2951 * Load the code segment for the new task.
2952 */
2953 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2954 {
2955 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2957 }
2958
2959 /* Fetch the descriptor. */
2960 IEMSELDESC DescCS;
2961 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2962 if (rcStrict != VINF_SUCCESS)
2963 {
2964 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2965 return rcStrict;
2966 }
2967
2968 /* CS must be a code segment. */
2969 if ( !DescCS.Legacy.Gen.u1DescType
2970 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2971 {
2972 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2973 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2974 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2975 }
2976
2977 /* For conforming CS, DPL must be less than or equal to the RPL. */
2978 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2979 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2980 {
2981 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2982 DescCS.Legacy.Gen.u2Dpl));
2983 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2984 }
2985
2986 /* For non-conforming CS, DPL must match RPL. */
2987 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2988 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2989 {
2990 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2991 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2992 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2993 }
2994
2995 /* Is it there? */
2996 if (!DescCS.Legacy.Gen.u1Present)
2997 {
2998 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2999 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3003 u64Base = X86DESC_BASE(&DescCS.Legacy);
3004
3005 /* Set the accessed bit before committing the result into CS. */
3006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3007 {
3008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3009 if (rcStrict != VINF_SUCCESS)
3010 return rcStrict;
3011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3012 }
3013
3014 /* Commit CS. */
3015 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3016 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3017 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3018 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3019 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3020 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3022 }
3023
3024 /* Make sure the CPU mode is correct. */
3025 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3026 if (fExecNew != pVCpu->iem.s.fExec)
3027 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3028 pVCpu->iem.s.fExec = fExecNew;
3029
3030 /** @todo Debug trap. */
3031 if (fIsNewTss386 && fNewDebugTrap)
3032 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3033
3034 /*
3035 * Construct the error code masks based on what caused this task switch.
3036 * See Intel Instruction reference for INT.
3037 */
3038 uint16_t uExt;
3039 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3040 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3041 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3042 uExt = 1;
3043 else
3044 uExt = 0;
3045
3046 /*
3047 * Push any error code on to the new stack.
3048 */
3049 if (fFlags & IEM_XCPT_FLAGS_ERR)
3050 {
3051 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3052 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3053 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3054
3055 /* Check that there is sufficient space on the stack. */
3056 /** @todo Factor out segment limit checking for normal/expand down segments
3057 * into a separate function. */
3058 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3059 {
3060 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3061 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3062 {
3063 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3064 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3065 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3066 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3067 }
3068 }
3069 else
3070 {
3071 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3072 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3073 {
3074 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3075 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3076 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3077 }
3078 }
3079
3080
3081 if (fIsNewTss386)
3082 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3083 else
3084 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3085 if (rcStrict != VINF_SUCCESS)
3086 {
3087 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3088 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3089 return rcStrict;
3090 }
3091 }
3092
3093 /* Check the new EIP against the new CS limit. */
3094 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3095 {
3096 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3097 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3098 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3099 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3100 }
3101
3102 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3103 pVCpu->cpum.GstCtx.ss.Sel));
3104 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3105}
3106
3107
3108/**
3109 * Implements exceptions and interrupts for protected mode.
3110 *
3111 * @returns VBox strict status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param cbInstr The number of bytes to offset rIP by in the return
3114 * address.
3115 * @param u8Vector The interrupt / exception vector number.
3116 * @param fFlags The flags.
3117 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3118 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3119 */
3120static VBOXSTRICTRC
3121iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3122 uint8_t cbInstr,
3123 uint8_t u8Vector,
3124 uint32_t fFlags,
3125 uint16_t uErr,
3126 uint64_t uCr2) RT_NOEXCEPT
3127{
3128 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3129
3130 /*
3131 * Read the IDT entry.
3132 */
3133 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3134 {
3135 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3136 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3137 }
3138 X86DESC Idte;
3139 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3140 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3141 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3142 {
3143 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3144 return rcStrict;
3145 }
3146 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3147 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3148 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3149 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3150
3151 /*
3152 * Check the descriptor type, DPL and such.
3153 * ASSUMES this is done in the same order as described for call-gate calls.
3154 */
3155 if (Idte.Gate.u1DescType)
3156 {
3157 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3159 }
3160 bool fTaskGate = false;
3161 uint8_t f32BitGate = true;
3162 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3163 switch (Idte.Gate.u4Type)
3164 {
3165 case X86_SEL_TYPE_SYS_UNDEFINED:
3166 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3167 case X86_SEL_TYPE_SYS_LDT:
3168 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3169 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3170 case X86_SEL_TYPE_SYS_UNDEFINED2:
3171 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3172 case X86_SEL_TYPE_SYS_UNDEFINED3:
3173 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3174 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3175 case X86_SEL_TYPE_SYS_UNDEFINED4:
3176 {
3177 /** @todo check what actually happens when the type is wrong...
3178 * esp. call gates. */
3179 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3180 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3181 }
3182
3183 case X86_SEL_TYPE_SYS_286_INT_GATE:
3184 f32BitGate = false;
3185 RT_FALL_THRU();
3186 case X86_SEL_TYPE_SYS_386_INT_GATE:
3187 fEflToClear |= X86_EFL_IF;
3188 break;
3189
3190 case X86_SEL_TYPE_SYS_TASK_GATE:
3191 fTaskGate = true;
3192#ifndef IEM_IMPLEMENTS_TASKSWITCH
3193 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3194#endif
3195 break;
3196
3197 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3198 f32BitGate = false;
3199 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3200 break;
3201
3202 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3203 }
3204
3205 /* Check DPL against CPL if applicable. */
3206 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3207 {
3208 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3209 {
3210 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3211 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3212 }
3213 }
3214
3215 /* Is it there? */
3216 if (!Idte.Gate.u1Present)
3217 {
3218 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3219 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3220 }
3221
3222 /* Is it a task-gate? */
3223 if (fTaskGate)
3224 {
3225 /*
3226 * Construct the error code masks based on what caused this task switch.
3227 * See Intel Instruction reference for INT.
3228 */
3229 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3230 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3231 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3232 RTSEL SelTss = Idte.Gate.u16Sel;
3233
3234 /*
3235 * Fetch the TSS descriptor in the GDT.
3236 */
3237 IEMSELDESC DescTSS;
3238 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3239 if (rcStrict != VINF_SUCCESS)
3240 {
3241 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3242 VBOXSTRICTRC_VAL(rcStrict)));
3243 return rcStrict;
3244 }
3245
3246 /* The TSS descriptor must be a system segment and be available (not busy). */
3247 if ( DescTSS.Legacy.Gen.u1DescType
3248 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3249 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3250 {
3251 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3252 u8Vector, SelTss, DescTSS.Legacy.au64));
3253 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3254 }
3255
3256 /* The TSS must be present. */
3257 if (!DescTSS.Legacy.Gen.u1Present)
3258 {
3259 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3260 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3261 }
3262
3263 /* Do the actual task switch. */
3264 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3265 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3266 fFlags, uErr, uCr2, SelTss, &DescTSS);
3267 }
3268
3269 /* A null CS is bad. */
3270 RTSEL NewCS = Idte.Gate.u16Sel;
3271 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3272 {
3273 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3274 return iemRaiseGeneralProtectionFault0(pVCpu);
3275 }
3276
3277 /* Fetch the descriptor for the new CS. */
3278 IEMSELDESC DescCS;
3279 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3280 if (rcStrict != VINF_SUCCESS)
3281 {
3282 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3283 return rcStrict;
3284 }
3285
3286 /* Must be a code segment. */
3287 if (!DescCS.Legacy.Gen.u1DescType)
3288 {
3289 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3290 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3291 }
3292 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3293 {
3294 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3295 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3296 }
3297
3298 /* Don't allow lowering the privilege level. */
3299 /** @todo Does the lowering of privileges apply to software interrupts
3300 * only? This has bearings on the more-privileged or
3301 * same-privilege stack behavior further down. A testcase would
3302 * be nice. */
3303 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3304 {
3305 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3306 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3307 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3308 }
3309
3310 /* Make sure the selector is present. */
3311 if (!DescCS.Legacy.Gen.u1Present)
3312 {
3313 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3314 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3315 }
3316
3317#ifdef LOG_ENABLED
3318 /* If software interrupt, try decode it if logging is enabled and such. */
3319 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3320 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3321 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3322#endif
3323
3324 /* Check the new EIP against the new CS limit. */
3325 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3326 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3327 ? Idte.Gate.u16OffsetLow
3328 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3329 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3330 if (uNewEip > cbLimitCS)
3331 {
3332 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3333 u8Vector, uNewEip, cbLimitCS, NewCS));
3334 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3335 }
3336 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3337
3338 /* Calc the flag image to push. */
3339 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3340 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3341 fEfl &= ~X86_EFL_RF;
3342 else
3343 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3344
3345 /* From V8086 mode only go to CPL 0. */
3346 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3347 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3348 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3349 {
3350 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3351 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3352 }
3353
3354 /*
3355 * If the privilege level changes, we need to get a new stack from the TSS.
3356 * This in turns means validating the new SS and ESP...
3357 */
3358 if (uNewCpl != IEM_GET_CPL(pVCpu))
3359 {
3360 RTSEL NewSS;
3361 uint32_t uNewEsp;
3362 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3363 if (rcStrict != VINF_SUCCESS)
3364 return rcStrict;
3365
3366 IEMSELDESC DescSS;
3367 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3368 if (rcStrict != VINF_SUCCESS)
3369 return rcStrict;
3370 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3371 if (!DescSS.Legacy.Gen.u1DefBig)
3372 {
3373 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3374 uNewEsp = (uint16_t)uNewEsp;
3375 }
3376
3377 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3378
3379 /* Check that there is sufficient space for the stack frame. */
3380 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3381 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3382 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3383 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3384
3385 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3386 {
3387 if ( uNewEsp - 1 > cbLimitSS
3388 || uNewEsp < cbStackFrame)
3389 {
3390 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3391 u8Vector, NewSS, uNewEsp, cbStackFrame));
3392 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3393 }
3394 }
3395 else
3396 {
3397 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3398 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3399 {
3400 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3401 u8Vector, NewSS, uNewEsp, cbStackFrame));
3402 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3403 }
3404 }
3405
3406 /*
3407 * Start making changes.
3408 */
3409
3410 /* Set the new CPL so that stack accesses use it. */
3411 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3412 IEM_SET_CPL(pVCpu, uNewCpl);
3413
3414 /* Create the stack frame. */
3415 uint8_t bUnmapInfoStackFrame;
3416 RTPTRUNION uStackFrame;
3417 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3418 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3419 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3420 if (rcStrict != VINF_SUCCESS)
3421 return rcStrict;
3422 if (f32BitGate)
3423 {
3424 if (fFlags & IEM_XCPT_FLAGS_ERR)
3425 *uStackFrame.pu32++ = uErr;
3426 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3427 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3428 uStackFrame.pu32[2] = fEfl;
3429 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3430 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3431 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3432 if (fEfl & X86_EFL_VM)
3433 {
3434 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3435 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3436 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3437 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3438 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3439 }
3440 }
3441 else
3442 {
3443 if (fFlags & IEM_XCPT_FLAGS_ERR)
3444 *uStackFrame.pu16++ = uErr;
3445 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3446 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3447 uStackFrame.pu16[2] = fEfl;
3448 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3449 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3450 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3451 if (fEfl & X86_EFL_VM)
3452 {
3453 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3454 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3455 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3456 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3457 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3458 }
3459 }
3460 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3461 if (rcStrict != VINF_SUCCESS)
3462 return rcStrict;
3463
3464 /* Mark the selectors 'accessed' (hope this is the correct time). */
3465 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3466 * after pushing the stack frame? (Write protect the gdt + stack to
3467 * find out.) */
3468 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3469 {
3470 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3471 if (rcStrict != VINF_SUCCESS)
3472 return rcStrict;
3473 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3474 }
3475
3476 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3477 {
3478 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3479 if (rcStrict != VINF_SUCCESS)
3480 return rcStrict;
3481 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3482 }
3483
3484 /*
3485 * Start comitting the register changes (joins with the DPL=CPL branch).
3486 */
3487 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3488 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3489 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3490 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3491 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3492 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3493 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3494 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3495 * SP is loaded).
3496 * Need to check the other combinations too:
3497 * - 16-bit TSS, 32-bit handler
3498 * - 32-bit TSS, 16-bit handler */
3499 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3500 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3501 else
3502 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3503
3504 if (fEfl & X86_EFL_VM)
3505 {
3506 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3507 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3508 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3509 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3510 }
3511 }
3512 /*
3513 * Same privilege, no stack change and smaller stack frame.
3514 */
3515 else
3516 {
3517 uint64_t uNewRsp;
3518 uint8_t bUnmapInfoStackFrame;
3519 RTPTRUNION uStackFrame;
3520 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3521 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3522 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3523 if (rcStrict != VINF_SUCCESS)
3524 return rcStrict;
3525
3526 if (f32BitGate)
3527 {
3528 if (fFlags & IEM_XCPT_FLAGS_ERR)
3529 *uStackFrame.pu32++ = uErr;
3530 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3531 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3532 uStackFrame.pu32[2] = fEfl;
3533 }
3534 else
3535 {
3536 if (fFlags & IEM_XCPT_FLAGS_ERR)
3537 *uStackFrame.pu16++ = uErr;
3538 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3539 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3540 uStackFrame.pu16[2] = fEfl;
3541 }
3542 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3543 if (rcStrict != VINF_SUCCESS)
3544 return rcStrict;
3545
3546 /* Mark the CS selector as 'accessed'. */
3547 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3548 {
3549 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3550 if (rcStrict != VINF_SUCCESS)
3551 return rcStrict;
3552 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3553 }
3554
3555 /*
3556 * Start committing the register changes (joins with the other branch).
3557 */
3558 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3559 }
3560
3561 /* ... register committing continues. */
3562 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3563 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3564 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3565 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3566 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3567 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3568
3569 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3570 fEfl &= ~fEflToClear;
3571 IEMMISC_SET_EFL(pVCpu, fEfl);
3572
3573 if (fFlags & IEM_XCPT_FLAGS_CR2)
3574 pVCpu->cpum.GstCtx.cr2 = uCr2;
3575
3576 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3577 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3578
3579 /* Make sure the execution flags are correct. */
3580 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3581 if (fExecNew != pVCpu->iem.s.fExec)
3582 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3583 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3584 pVCpu->iem.s.fExec = fExecNew;
3585 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3586
3587 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3588}
3589
3590
3591/**
3592 * Implements exceptions and interrupts for long mode.
3593 *
3594 * @returns VBox strict status code.
3595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3596 * @param cbInstr The number of bytes to offset rIP by in the return
3597 * address.
3598 * @param u8Vector The interrupt / exception vector number.
3599 * @param fFlags The flags.
3600 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3601 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3602 */
3603static VBOXSTRICTRC
3604iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3605 uint8_t cbInstr,
3606 uint8_t u8Vector,
3607 uint32_t fFlags,
3608 uint16_t uErr,
3609 uint64_t uCr2) RT_NOEXCEPT
3610{
3611 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3612
3613 /*
3614 * Read the IDT entry.
3615 */
3616 uint16_t offIdt = (uint16_t)u8Vector << 4;
3617 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3618 {
3619 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3620 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3621 }
3622 X86DESC64 Idte;
3623#ifdef _MSC_VER /* Shut up silly compiler warning. */
3624 Idte.au64[0] = 0;
3625 Idte.au64[1] = 0;
3626#endif
3627 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3628 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3629 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3630 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3631 {
3632 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3633 return rcStrict;
3634 }
3635 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3636 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3637 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3638
3639 /*
3640 * Check the descriptor type, DPL and such.
3641 * ASSUMES this is done in the same order as described for call-gate calls.
3642 */
3643 if (Idte.Gate.u1DescType)
3644 {
3645 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3646 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3647 }
3648 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3649 switch (Idte.Gate.u4Type)
3650 {
3651 case AMD64_SEL_TYPE_SYS_INT_GATE:
3652 fEflToClear |= X86_EFL_IF;
3653 break;
3654 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3655 break;
3656
3657 default:
3658 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3659 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3660 }
3661
3662 /* Check DPL against CPL if applicable. */
3663 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3664 {
3665 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3666 {
3667 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3668 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3669 }
3670 }
3671
3672 /* Is it there? */
3673 if (!Idte.Gate.u1Present)
3674 {
3675 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3676 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3677 }
3678
3679 /* A null CS is bad. */
3680 RTSEL NewCS = Idte.Gate.u16Sel;
3681 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3682 {
3683 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3684 return iemRaiseGeneralProtectionFault0(pVCpu);
3685 }
3686
3687 /* Fetch the descriptor for the new CS. */
3688 IEMSELDESC DescCS;
3689 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3690 if (rcStrict != VINF_SUCCESS)
3691 {
3692 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3693 return rcStrict;
3694 }
3695
3696 /* Must be a 64-bit code segment. */
3697 if (!DescCS.Long.Gen.u1DescType)
3698 {
3699 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3700 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3701 }
3702 if ( !DescCS.Long.Gen.u1Long
3703 || DescCS.Long.Gen.u1DefBig
3704 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3705 {
3706 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3707 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3708 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3709 }
3710
3711 /* Don't allow lowering the privilege level. For non-conforming CS
3712 selectors, the CS.DPL sets the privilege level the trap/interrupt
3713 handler runs at. For conforming CS selectors, the CPL remains
3714 unchanged, but the CS.DPL must be <= CPL. */
3715 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3716 * when CPU in Ring-0. Result \#GP? */
3717 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3718 {
3719 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3720 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3721 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3722 }
3723
3724
3725 /* Make sure the selector is present. */
3726 if (!DescCS.Legacy.Gen.u1Present)
3727 {
3728 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3729 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3730 }
3731
3732 /* Check that the new RIP is canonical. */
3733 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3734 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3735 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3736 if (!IEM_IS_CANONICAL(uNewRip))
3737 {
3738 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3739 return iemRaiseGeneralProtectionFault0(pVCpu);
3740 }
3741
3742 /*
3743 * If the privilege level changes or if the IST isn't zero, we need to get
3744 * a new stack from the TSS.
3745 */
3746 uint64_t uNewRsp;
3747 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3748 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3749 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3750 || Idte.Gate.u3IST != 0)
3751 {
3752 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 }
3756 else
3757 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3758 uNewRsp &= ~(uint64_t)0xf;
3759
3760 /*
3761 * Calc the flag image to push.
3762 */
3763 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3764 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3765 fEfl &= ~X86_EFL_RF;
3766 else
3767 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3768
3769 /*
3770 * Start making changes.
3771 */
3772 /* Set the new CPL so that stack accesses use it. */
3773 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3774 IEM_SET_CPL(pVCpu, uNewCpl);
3775/** @todo Setting CPL this early seems wrong as it would affect and errors we
3776 * raise accessing the stack and (?) GDT/LDT... */
3777
3778 /* Create the stack frame. */
3779 uint8_t bUnmapInfoStackFrame;
3780 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3781 RTPTRUNION uStackFrame;
3782 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3783 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3784 if (rcStrict != VINF_SUCCESS)
3785 return rcStrict;
3786
3787 if (fFlags & IEM_XCPT_FLAGS_ERR)
3788 *uStackFrame.pu64++ = uErr;
3789 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3790 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3791 uStackFrame.pu64[2] = fEfl;
3792 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3793 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3794 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3795 if (rcStrict != VINF_SUCCESS)
3796 return rcStrict;
3797
3798 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3799 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3800 * after pushing the stack frame? (Write protect the gdt + stack to
3801 * find out.) */
3802 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3803 {
3804 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3805 if (rcStrict != VINF_SUCCESS)
3806 return rcStrict;
3807 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3808 }
3809
3810 /*
3811 * Start comitting the register changes.
3812 */
3813 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3814 * hidden registers when interrupting 32-bit or 16-bit code! */
3815 if (uNewCpl != uOldCpl)
3816 {
3817 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3818 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3819 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3820 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3821 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3822 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3823 }
3824 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3825 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3826 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3827 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3828 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3829 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3830 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3831 pVCpu->cpum.GstCtx.rip = uNewRip;
3832
3833 fEfl &= ~fEflToClear;
3834 IEMMISC_SET_EFL(pVCpu, fEfl);
3835
3836 if (fFlags & IEM_XCPT_FLAGS_CR2)
3837 pVCpu->cpum.GstCtx.cr2 = uCr2;
3838
3839 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3840 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3841
3842 iemRecalcExecModeAndCplFlags(pVCpu);
3843
3844 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3845}
3846
3847
3848/**
3849 * Implements exceptions and interrupts.
3850 *
3851 * All exceptions and interrupts goes thru this function!
3852 *
3853 * @returns VBox strict status code.
3854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3855 * @param cbInstr The number of bytes to offset rIP by in the return
3856 * address.
3857 * @param u8Vector The interrupt / exception vector number.
3858 * @param fFlags The flags.
3859 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3860 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3861 */
3862VBOXSTRICTRC
3863iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3864 uint8_t cbInstr,
3865 uint8_t u8Vector,
3866 uint32_t fFlags,
3867 uint16_t uErr,
3868 uint64_t uCr2) RT_NOEXCEPT
3869{
3870 /*
3871 * Get all the state that we might need here.
3872 */
3873 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3874 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3875
3876#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3877 /*
3878 * Flush prefetch buffer
3879 */
3880 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3881#endif
3882
3883 /*
3884 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3885 */
3886 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3887 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3888 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3889 | IEM_XCPT_FLAGS_BP_INSTR
3890 | IEM_XCPT_FLAGS_ICEBP_INSTR
3891 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3892 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3893 {
3894 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3895 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3896 u8Vector = X86_XCPT_GP;
3897 uErr = 0;
3898 }
3899
3900 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3901#ifdef DBGFTRACE_ENABLED
3902 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3903 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3904 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3905#endif
3906
3907 /*
3908 * Check if DBGF wants to intercept the exception.
3909 */
3910 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3911 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3912 { /* likely */ }
3913 else
3914 {
3915 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3916 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3917 if (rcStrict != VINF_SUCCESS)
3918 return rcStrict;
3919 }
3920
3921 /*
3922 * Evaluate whether NMI blocking should be in effect.
3923 * Normally, NMI blocking is in effect whenever we inject an NMI.
3924 */
3925 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3926 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3927
3928#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3929 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3930 {
3931 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3932 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3933 return rcStrict0;
3934
3935 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3936 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3937 {
3938 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3939 fBlockNmi = false;
3940 }
3941 }
3942#endif
3943
3944#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3945 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3946 {
3947 /*
3948 * If the event is being injected as part of VMRUN, it isn't subject to event
3949 * intercepts in the nested-guest. However, secondary exceptions that occur
3950 * during injection of any event -are- subject to exception intercepts.
3951 *
3952 * See AMD spec. 15.20 "Event Injection".
3953 */
3954 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3955 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3956 else
3957 {
3958 /*
3959 * Check and handle if the event being raised is intercepted.
3960 */
3961 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3962 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3963 return rcStrict0;
3964 }
3965 }
3966#endif
3967
3968 /*
3969 * Set NMI blocking if necessary.
3970 */
3971 if (fBlockNmi)
3972 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3973
3974 /*
3975 * Do recursion accounting.
3976 */
3977 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3978 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3979 if (pVCpu->iem.s.cXcptRecursions == 0)
3980 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3981 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3982 else
3983 {
3984 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3985 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3986 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3987
3988 if (pVCpu->iem.s.cXcptRecursions >= 4)
3989 {
3990#ifdef DEBUG_bird
3991 AssertFailed();
3992#endif
3993 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3994 }
3995
3996 /*
3997 * Evaluate the sequence of recurring events.
3998 */
3999 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4000 NULL /* pXcptRaiseInfo */);
4001 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4002 { /* likely */ }
4003 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4004 {
4005 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4006 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4007 u8Vector = X86_XCPT_DF;
4008 uErr = 0;
4009#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4010 /* VMX nested-guest #DF intercept needs to be checked here. */
4011 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4012 {
4013 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4014 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4015 return rcStrict0;
4016 }
4017#endif
4018 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4019 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4020 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4021 }
4022 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4023 {
4024 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4025 return iemInitiateCpuShutdown(pVCpu);
4026 }
4027 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4028 {
4029 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4030 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4031 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4032 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4033 return VERR_EM_GUEST_CPU_HANG;
4034 }
4035 else
4036 {
4037 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4038 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4039 return VERR_IEM_IPE_9;
4040 }
4041
4042 /*
4043 * The 'EXT' bit is set when an exception occurs during deliver of an external
4044 * event (such as an interrupt or earlier exception)[1]. Privileged software
4045 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4046 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4047 *
4048 * [1] - Intel spec. 6.13 "Error Code"
4049 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4050 * [3] - Intel Instruction reference for INT n.
4051 */
4052 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4053 && (fFlags & IEM_XCPT_FLAGS_ERR)
4054 && u8Vector != X86_XCPT_PF
4055 && u8Vector != X86_XCPT_DF)
4056 {
4057 uErr |= X86_TRAP_ERR_EXTERNAL;
4058 }
4059 }
4060
4061 pVCpu->iem.s.cXcptRecursions++;
4062 pVCpu->iem.s.uCurXcpt = u8Vector;
4063 pVCpu->iem.s.fCurXcpt = fFlags;
4064 pVCpu->iem.s.uCurXcptErr = uErr;
4065 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4066
4067 /*
4068 * Extensive logging.
4069 */
4070#if defined(LOG_ENABLED) && defined(IN_RING3)
4071 if (LogIs3Enabled())
4072 {
4073 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4074 char szRegs[4096];
4075 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4076 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4077 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4078 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4079 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4080 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4081 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4082 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4083 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4084 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4085 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4086 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4087 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4088 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4089 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4090 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4091 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4092 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4093 " efer=%016VR{efer}\n"
4094 " pat=%016VR{pat}\n"
4095 " sf_mask=%016VR{sf_mask}\n"
4096 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4097 " lstar=%016VR{lstar}\n"
4098 " star=%016VR{star} cstar=%016VR{cstar}\n"
4099 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4100 );
4101
4102 char szInstr[256];
4103 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4104 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4105 szInstr, sizeof(szInstr), NULL);
4106 Log3(("%s%s\n", szRegs, szInstr));
4107 }
4108#endif /* LOG_ENABLED */
4109
4110 /*
4111 * Stats.
4112 */
4113 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4114 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4115 else if (u8Vector <= X86_XCPT_LAST)
4116 {
4117 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4118 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4119 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4120 }
4121
4122 /*
4123 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4124 * to ensure that a stale TLB or paging cache entry will only cause one
4125 * spurious #PF.
4126 */
4127 if ( u8Vector == X86_XCPT_PF
4128 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4129 IEMTlbInvalidatePage(pVCpu, uCr2);
4130
4131 /*
4132 * Call the mode specific worker function.
4133 */
4134 VBOXSTRICTRC rcStrict;
4135 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4136 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4137 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4138 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4139 else
4140 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4141
4142 /* Flush the prefetch buffer. */
4143 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4144
4145 /*
4146 * Unwind.
4147 */
4148 pVCpu->iem.s.cXcptRecursions--;
4149 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4150 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4151 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4152 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4153 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4154 return rcStrict;
4155}
4156
4157#ifdef IEM_WITH_SETJMP
4158/**
4159 * See iemRaiseXcptOrInt. Will not return.
4160 */
4161DECL_NO_RETURN(void)
4162iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4163 uint8_t cbInstr,
4164 uint8_t u8Vector,
4165 uint32_t fFlags,
4166 uint16_t uErr,
4167 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4168{
4169 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4170 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4171}
4172#endif
4173
4174
4175/** \#DE - 00. */
4176VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4177{
4178 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4179}
4180
4181
4182/** \#DB - 01.
4183 * @note This automatically clear DR7.GD. */
4184VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4185{
4186 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4187 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4188 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4189}
4190
4191
4192/** \#BR - 05. */
4193VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4194{
4195 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4196}
4197
4198
4199/** \#UD - 06. */
4200VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4201{
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4203}
4204
4205
4206/** \#NM - 07. */
4207VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4210}
4211
4212
4213/** \#TS(err) - 0a. */
4214VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4215{
4216 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4217}
4218
4219
4220/** \#TS(tr) - 0a. */
4221VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4222{
4223 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4224 pVCpu->cpum.GstCtx.tr.Sel, 0);
4225}
4226
4227
4228/** \#TS(0) - 0a. */
4229VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4230{
4231 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4232 0, 0);
4233}
4234
4235
4236/** \#TS(err) - 0a. */
4237VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4238{
4239 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4240 uSel & X86_SEL_MASK_OFF_RPL, 0);
4241}
4242
4243
4244/** \#NP(err) - 0b. */
4245VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4246{
4247 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4248}
4249
4250
4251/** \#NP(sel) - 0b. */
4252VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4253{
4254 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4255 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4256 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4257 uSel & ~X86_SEL_RPL, 0);
4258}
4259
4260
4261/** \#SS(seg) - 0c. */
4262VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4263{
4264 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4265 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4266 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4267 uSel & ~X86_SEL_RPL, 0);
4268}
4269
4270
4271/** \#SS(err) - 0c. */
4272VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4273{
4274 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4275 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4276 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4277}
4278
4279
4280/** \#GP(n) - 0d. */
4281VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4282{
4283 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4284 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4285}
4286
4287
4288/** \#GP(0) - 0d. */
4289VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4290{
4291 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4292 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4293}
4294
4295#ifdef IEM_WITH_SETJMP
4296/** \#GP(0) - 0d. */
4297DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4298{
4299 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4300 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4301}
4302#endif
4303
4304
4305/** \#GP(sel) - 0d. */
4306VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4307{
4308 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4309 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4310 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4311 Sel & ~X86_SEL_RPL, 0);
4312}
4313
4314
4315/** \#GP(0) - 0d. */
4316VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4317{
4318 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4319 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4320}
4321
4322
4323/** \#GP(sel) - 0d. */
4324VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4325{
4326 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4327 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4328 NOREF(iSegReg); NOREF(fAccess);
4329 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4330 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4331}
4332
4333#ifdef IEM_WITH_SETJMP
4334/** \#GP(sel) - 0d, longjmp. */
4335DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4336{
4337 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4338 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4339 NOREF(iSegReg); NOREF(fAccess);
4340 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4341 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4342}
4343#endif
4344
4345/** \#GP(sel) - 0d. */
4346VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4347{
4348 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4349 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4350 NOREF(Sel);
4351 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4352}
4353
4354#ifdef IEM_WITH_SETJMP
4355/** \#GP(sel) - 0d, longjmp. */
4356DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4357{
4358 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4359 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4360 NOREF(Sel);
4361 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4362}
4363#endif
4364
4365
4366/** \#GP(sel) - 0d. */
4367VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4368{
4369 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4370 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4371 NOREF(iSegReg); NOREF(fAccess);
4372 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4373}
4374
4375#ifdef IEM_WITH_SETJMP
4376/** \#GP(sel) - 0d, longjmp. */
4377DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4378{
4379 NOREF(iSegReg); NOREF(fAccess);
4380 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4381}
4382#endif
4383
4384
4385/** \#PF(n) - 0e. */
4386VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4387{
4388 uint16_t uErr;
4389 switch (rc)
4390 {
4391 case VERR_PAGE_NOT_PRESENT:
4392 case VERR_PAGE_TABLE_NOT_PRESENT:
4393 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4394 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4395 uErr = 0;
4396 break;
4397
4398 default:
4399 AssertMsgFailed(("%Rrc\n", rc));
4400 RT_FALL_THRU();
4401 case VERR_ACCESS_DENIED:
4402 uErr = X86_TRAP_PF_P;
4403 break;
4404
4405 /** @todo reserved */
4406 }
4407
4408 if (IEM_GET_CPL(pVCpu) == 3)
4409 uErr |= X86_TRAP_PF_US;
4410
4411 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4412 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4413 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4414 uErr |= X86_TRAP_PF_ID;
4415
4416#if 0 /* This is so much non-sense, really. Why was it done like that? */
4417 /* Note! RW access callers reporting a WRITE protection fault, will clear
4418 the READ flag before calling. So, read-modify-write accesses (RW)
4419 can safely be reported as READ faults. */
4420 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4421 uErr |= X86_TRAP_PF_RW;
4422#else
4423 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4424 {
4425 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4426 /// (regardless of outcome of the comparison in the latter case).
4427 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4428 uErr |= X86_TRAP_PF_RW;
4429 }
4430#endif
4431
4432 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4433 of the memory operand rather than at the start of it. (Not sure what
4434 happens if it crosses a page boundrary.) The current heuristics for
4435 this is to report the #PF for the last byte if the access is more than
4436 64 bytes. This is probably not correct, but we can work that out later,
4437 main objective now is to get FXSAVE to work like for real hardware and
4438 make bs3-cpu-basic2 work. */
4439 if (cbAccess <= 64)
4440 { /* likely*/ }
4441 else
4442 GCPtrWhere += cbAccess - 1;
4443
4444 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4445 uErr, GCPtrWhere);
4446}
4447
4448#ifdef IEM_WITH_SETJMP
4449/** \#PF(n) - 0e, longjmp. */
4450DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4451 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4452{
4453 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4454}
4455#endif
4456
4457
4458/** \#MF(0) - 10. */
4459VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4460{
4461 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4462 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4463
4464 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4465 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4466 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4467}
4468
4469
4470/** \#AC(0) - 11. */
4471VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4472{
4473 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4474}
4475
4476#ifdef IEM_WITH_SETJMP
4477/** \#AC(0) - 11, longjmp. */
4478DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4479{
4480 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4481}
4482#endif
4483
4484
4485/** \#XF(0)/\#XM(0) - 19. */
4486VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4487{
4488 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4489}
4490
4491
4492/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4493IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4494{
4495 NOREF(cbInstr);
4496 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4497}
4498
4499
4500/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4501IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4502{
4503 NOREF(cbInstr);
4504 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4505}
4506
4507
4508/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4509IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4510{
4511 NOREF(cbInstr);
4512 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4513}
4514
4515
4516/** @} */
4517
4518/** @name Common opcode decoders.
4519 * @{
4520 */
4521//#include <iprt/mem.h>
4522
4523/**
4524 * Used to add extra details about a stub case.
4525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4526 */
4527void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4528{
4529#if defined(LOG_ENABLED) && defined(IN_RING3)
4530 PVM pVM = pVCpu->CTX_SUFF(pVM);
4531 char szRegs[4096];
4532 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4533 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4534 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4535 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4536 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4537 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4538 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4539 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4540 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4541 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4542 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4543 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4544 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4545 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4546 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4547 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4548 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4549 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4550 " efer=%016VR{efer}\n"
4551 " pat=%016VR{pat}\n"
4552 " sf_mask=%016VR{sf_mask}\n"
4553 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4554 " lstar=%016VR{lstar}\n"
4555 " star=%016VR{star} cstar=%016VR{cstar}\n"
4556 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4557 );
4558
4559 char szInstr[256];
4560 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4561 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4562 szInstr, sizeof(szInstr), NULL);
4563
4564 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4565#else
4566 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4567#endif
4568}
4569
4570/** @} */
4571
4572
4573
4574/** @name Register Access.
4575 * @{
4576 */
4577
4578/**
4579 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4580 *
4581 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4582 * segment limit.
4583 *
4584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4585 * @param cbInstr Instruction size.
4586 * @param offNextInstr The offset of the next instruction.
4587 * @param enmEffOpSize Effective operand size.
4588 */
4589VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4590 IEMMODE enmEffOpSize) RT_NOEXCEPT
4591{
4592 switch (enmEffOpSize)
4593 {
4594 case IEMMODE_16BIT:
4595 {
4596 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4597 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4598 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4599 pVCpu->cpum.GstCtx.rip = uNewIp;
4600 else
4601 return iemRaiseGeneralProtectionFault0(pVCpu);
4602 break;
4603 }
4604
4605 case IEMMODE_32BIT:
4606 {
4607 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4608 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4609
4610 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4611 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4612 pVCpu->cpum.GstCtx.rip = uNewEip;
4613 else
4614 return iemRaiseGeneralProtectionFault0(pVCpu);
4615 break;
4616 }
4617
4618 case IEMMODE_64BIT:
4619 {
4620 Assert(IEM_IS_64BIT_CODE(pVCpu));
4621
4622 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4623 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4624 pVCpu->cpum.GstCtx.rip = uNewRip;
4625 else
4626 return iemRaiseGeneralProtectionFault0(pVCpu);
4627 break;
4628 }
4629
4630 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4631 }
4632
4633#ifndef IEM_WITH_CODE_TLB
4634 /* Flush the prefetch buffer. */
4635 pVCpu->iem.s.cbOpcode = cbInstr;
4636#endif
4637
4638 /*
4639 * Clear RF and finish the instruction (maybe raise #DB).
4640 */
4641 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4642}
4643
4644
4645/**
4646 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4647 *
4648 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4649 * segment limit.
4650 *
4651 * @returns Strict VBox status code.
4652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4653 * @param cbInstr Instruction size.
4654 * @param offNextInstr The offset of the next instruction.
4655 */
4656VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4657{
4658 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4659
4660 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4661 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4662 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4663 pVCpu->cpum.GstCtx.rip = uNewIp;
4664 else
4665 return iemRaiseGeneralProtectionFault0(pVCpu);
4666
4667#ifndef IEM_WITH_CODE_TLB
4668 /* Flush the prefetch buffer. */
4669 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4670#endif
4671
4672 /*
4673 * Clear RF and finish the instruction (maybe raise #DB).
4674 */
4675 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4676}
4677
4678
4679/**
4680 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4681 *
4682 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4683 * segment limit.
4684 *
4685 * @returns Strict VBox status code.
4686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4687 * @param cbInstr Instruction size.
4688 * @param offNextInstr The offset of the next instruction.
4689 * @param enmEffOpSize Effective operand size.
4690 */
4691VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4692 IEMMODE enmEffOpSize) RT_NOEXCEPT
4693{
4694 if (enmEffOpSize == IEMMODE_32BIT)
4695 {
4696 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4697
4698 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4699 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4700 pVCpu->cpum.GstCtx.rip = uNewEip;
4701 else
4702 return iemRaiseGeneralProtectionFault0(pVCpu);
4703 }
4704 else
4705 {
4706 Assert(enmEffOpSize == IEMMODE_64BIT);
4707
4708 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4709 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4710 pVCpu->cpum.GstCtx.rip = uNewRip;
4711 else
4712 return iemRaiseGeneralProtectionFault0(pVCpu);
4713 }
4714
4715#ifndef IEM_WITH_CODE_TLB
4716 /* Flush the prefetch buffer. */
4717 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4718#endif
4719
4720 /*
4721 * Clear RF and finish the instruction (maybe raise #DB).
4722 */
4723 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4724}
4725
4726/** @} */
4727
4728
4729/** @name FPU access and helpers.
4730 *
4731 * @{
4732 */
4733
4734/**
4735 * Updates the x87.DS and FPUDP registers.
4736 *
4737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4738 * @param pFpuCtx The FPU context.
4739 * @param iEffSeg The effective segment register.
4740 * @param GCPtrEff The effective address relative to @a iEffSeg.
4741 */
4742DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4743{
4744 RTSEL sel;
4745 switch (iEffSeg)
4746 {
4747 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4748 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4749 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4750 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4751 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4752 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4753 default:
4754 AssertMsgFailed(("%d\n", iEffSeg));
4755 sel = pVCpu->cpum.GstCtx.ds.Sel;
4756 }
4757 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4758 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4759 {
4760 pFpuCtx->DS = 0;
4761 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4762 }
4763 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4764 {
4765 pFpuCtx->DS = sel;
4766 pFpuCtx->FPUDP = GCPtrEff;
4767 }
4768 else
4769 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4770}
4771
4772
4773/**
4774 * Rotates the stack registers in the push direction.
4775 *
4776 * @param pFpuCtx The FPU context.
4777 * @remarks This is a complete waste of time, but fxsave stores the registers in
4778 * stack order.
4779 */
4780DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4781{
4782 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4783 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4784 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4785 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4786 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4787 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4788 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4789 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4790 pFpuCtx->aRegs[0].r80 = r80Tmp;
4791}
4792
4793
4794/**
4795 * Rotates the stack registers in the pop direction.
4796 *
4797 * @param pFpuCtx The FPU context.
4798 * @remarks This is a complete waste of time, but fxsave stores the registers in
4799 * stack order.
4800 */
4801DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4802{
4803 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4804 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4805 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4806 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4807 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4808 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4809 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4810 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4811 pFpuCtx->aRegs[7].r80 = r80Tmp;
4812}
4813
4814
4815/**
4816 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4817 * exception prevents it.
4818 *
4819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4820 * @param pResult The FPU operation result to push.
4821 * @param pFpuCtx The FPU context.
4822 */
4823static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4824{
4825 /* Update FSW and bail if there are pending exceptions afterwards. */
4826 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4827 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4828 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4829 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4830 {
4831 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4832 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4834 pFpuCtx->FSW = fFsw;
4835 return;
4836 }
4837
4838 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4839 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4840 {
4841 /* All is fine, push the actual value. */
4842 pFpuCtx->FTW |= RT_BIT(iNewTop);
4843 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4844 }
4845 else if (pFpuCtx->FCW & X86_FCW_IM)
4846 {
4847 /* Masked stack overflow, push QNaN. */
4848 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4849 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4850 }
4851 else
4852 {
4853 /* Raise stack overflow, don't push anything. */
4854 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4855 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4856 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4858 return;
4859 }
4860
4861 fFsw &= ~X86_FSW_TOP_MASK;
4862 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4863 pFpuCtx->FSW = fFsw;
4864
4865 iemFpuRotateStackPush(pFpuCtx);
4866 RT_NOREF(pVCpu);
4867}
4868
4869
4870/**
4871 * Stores a result in a FPU register and updates the FSW and FTW.
4872 *
4873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4874 * @param pFpuCtx The FPU context.
4875 * @param pResult The result to store.
4876 * @param iStReg Which FPU register to store it in.
4877 */
4878static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4879{
4880 Assert(iStReg < 8);
4881 uint16_t fNewFsw = pFpuCtx->FSW;
4882 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4883 fNewFsw &= ~X86_FSW_C_MASK;
4884 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4885 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4886 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4887 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4888 pFpuCtx->FSW = fNewFsw;
4889 pFpuCtx->FTW |= RT_BIT(iReg);
4890 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4891 RT_NOREF(pVCpu);
4892}
4893
4894
4895/**
4896 * Only updates the FPU status word (FSW) with the result of the current
4897 * instruction.
4898 *
4899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4900 * @param pFpuCtx The FPU context.
4901 * @param u16FSW The FSW output of the current instruction.
4902 */
4903static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4904{
4905 uint16_t fNewFsw = pFpuCtx->FSW;
4906 fNewFsw &= ~X86_FSW_C_MASK;
4907 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4908 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4909 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4910 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4911 pFpuCtx->FSW = fNewFsw;
4912 RT_NOREF(pVCpu);
4913}
4914
4915
4916/**
4917 * Pops one item off the FPU stack if no pending exception prevents it.
4918 *
4919 * @param pFpuCtx The FPU context.
4920 */
4921static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4922{
4923 /* Check pending exceptions. */
4924 uint16_t uFSW = pFpuCtx->FSW;
4925 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4926 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4927 return;
4928
4929 /* TOP--. */
4930 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4931 uFSW &= ~X86_FSW_TOP_MASK;
4932 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4933 pFpuCtx->FSW = uFSW;
4934
4935 /* Mark the previous ST0 as empty. */
4936 iOldTop >>= X86_FSW_TOP_SHIFT;
4937 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4938
4939 /* Rotate the registers. */
4940 iemFpuRotateStackPop(pFpuCtx);
4941}
4942
4943
4944/**
4945 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4946 *
4947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4948 * @param pResult The FPU operation result to push.
4949 * @param uFpuOpcode The FPU opcode value.
4950 */
4951void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4952{
4953 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4954 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4955 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4956}
4957
4958
4959/**
4960 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4961 * and sets FPUDP and FPUDS.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pResult The FPU operation result to push.
4965 * @param iEffSeg The effective segment register.
4966 * @param GCPtrEff The effective address relative to @a iEffSeg.
4967 * @param uFpuOpcode The FPU opcode value.
4968 */
4969void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4970 uint16_t uFpuOpcode) RT_NOEXCEPT
4971{
4972 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4973 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4974 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4975 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4976}
4977
4978
4979/**
4980 * Replace ST0 with the first value and push the second onto the FPU stack,
4981 * unless a pending exception prevents it.
4982 *
4983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4984 * @param pResult The FPU operation result to store and push.
4985 * @param uFpuOpcode The FPU opcode value.
4986 */
4987void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4988{
4989 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4990 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4991
4992 /* Update FSW and bail if there are pending exceptions afterwards. */
4993 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4994 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4995 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4996 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4997 {
4998 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4999 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5000 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5001 pFpuCtx->FSW = fFsw;
5002 return;
5003 }
5004
5005 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5006 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5007 {
5008 /* All is fine, push the actual value. */
5009 pFpuCtx->FTW |= RT_BIT(iNewTop);
5010 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5011 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5012 }
5013 else if (pFpuCtx->FCW & X86_FCW_IM)
5014 {
5015 /* Masked stack overflow, push QNaN. */
5016 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5017 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5018 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5019 }
5020 else
5021 {
5022 /* Raise stack overflow, don't push anything. */
5023 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5024 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5025 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5026 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5027 return;
5028 }
5029
5030 fFsw &= ~X86_FSW_TOP_MASK;
5031 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5032 pFpuCtx->FSW = fFsw;
5033
5034 iemFpuRotateStackPush(pFpuCtx);
5035}
5036
5037
5038/**
5039 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5040 * FOP.
5041 *
5042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5043 * @param pResult The result to store.
5044 * @param iStReg Which FPU register to store it in.
5045 * @param uFpuOpcode The FPU opcode value.
5046 */
5047void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5048{
5049 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5050 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5051 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5052}
5053
5054
5055/**
5056 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5057 * FOP, and then pops the stack.
5058 *
5059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5060 * @param pResult The result to store.
5061 * @param iStReg Which FPU register to store it in.
5062 * @param uFpuOpcode The FPU opcode value.
5063 */
5064void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5065{
5066 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5067 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5068 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5069 iemFpuMaybePopOne(pFpuCtx);
5070}
5071
5072
5073/**
5074 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5075 * FPUDP, and FPUDS.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param pResult The result to store.
5079 * @param iStReg Which FPU register to store it in.
5080 * @param iEffSeg The effective memory operand selector register.
5081 * @param GCPtrEff The effective memory operand offset.
5082 * @param uFpuOpcode The FPU opcode value.
5083 */
5084void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5085 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5086{
5087 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5088 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5089 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5090 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5091}
5092
5093
5094/**
5095 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5096 * FPUDP, and FPUDS, and then pops the stack.
5097 *
5098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5099 * @param pResult The result to store.
5100 * @param iStReg Which FPU register to store it in.
5101 * @param iEffSeg The effective memory operand selector register.
5102 * @param GCPtrEff The effective memory operand offset.
5103 * @param uFpuOpcode The FPU opcode value.
5104 */
5105void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5106 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5107{
5108 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5109 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5110 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5111 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5112 iemFpuMaybePopOne(pFpuCtx);
5113}
5114
5115
5116/**
5117 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5118 *
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param uFpuOpcode The FPU opcode value.
5121 */
5122void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5126}
5127
5128
5129/**
5130 * Updates the FSW, FOP, FPUIP, and FPUCS.
5131 *
5132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5133 * @param u16FSW The FSW from the current instruction.
5134 * @param uFpuOpcode The FPU opcode value.
5135 */
5136void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5137{
5138 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5139 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5140 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5141}
5142
5143
5144/**
5145 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5146 *
5147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5148 * @param u16FSW The FSW from the current instruction.
5149 * @param uFpuOpcode The FPU opcode value.
5150 */
5151void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5152{
5153 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5154 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5155 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5156 iemFpuMaybePopOne(pFpuCtx);
5157}
5158
5159
5160/**
5161 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param u16FSW The FSW from the current instruction.
5165 * @param iEffSeg The effective memory operand selector register.
5166 * @param GCPtrEff The effective memory operand offset.
5167 * @param uFpuOpcode The FPU opcode value.
5168 */
5169void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5170{
5171 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5172 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5173 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5174 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5175}
5176
5177
5178/**
5179 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5180 *
5181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5182 * @param u16FSW The FSW from the current instruction.
5183 * @param uFpuOpcode The FPU opcode value.
5184 */
5185void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5189 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5190 iemFpuMaybePopOne(pFpuCtx);
5191 iemFpuMaybePopOne(pFpuCtx);
5192}
5193
5194
5195/**
5196 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5197 *
5198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5199 * @param u16FSW The FSW from the current instruction.
5200 * @param iEffSeg The effective memory operand selector register.
5201 * @param GCPtrEff The effective memory operand offset.
5202 * @param uFpuOpcode The FPU opcode value.
5203 */
5204void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5205 uint16_t uFpuOpcode) RT_NOEXCEPT
5206{
5207 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5208 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5209 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5210 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5211 iemFpuMaybePopOne(pFpuCtx);
5212}
5213
5214
5215/**
5216 * Worker routine for raising an FPU stack underflow exception.
5217 *
5218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5219 * @param pFpuCtx The FPU context.
5220 * @param iStReg The stack register being accessed.
5221 */
5222static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5223{
5224 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5225 if (pFpuCtx->FCW & X86_FCW_IM)
5226 {
5227 /* Masked underflow. */
5228 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5229 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5230 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5231 if (iStReg != UINT8_MAX)
5232 {
5233 pFpuCtx->FTW |= RT_BIT(iReg);
5234 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5235 }
5236 }
5237 else
5238 {
5239 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5240 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5241 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5242 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5243 }
5244 RT_NOREF(pVCpu);
5245}
5246
5247
5248/**
5249 * Raises a FPU stack underflow exception.
5250 *
5251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5252 * @param iStReg The destination register that should be loaded
5253 * with QNaN if \#IS is not masked. Specify
5254 * UINT8_MAX if none (like for fcom).
5255 * @param uFpuOpcode The FPU opcode value.
5256 */
5257void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5258{
5259 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5260 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5261 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5262}
5263
5264
5265void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5266{
5267 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5268 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5269 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5270 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5271}
5272
5273
5274void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5275{
5276 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5277 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5278 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5279 iemFpuMaybePopOne(pFpuCtx);
5280}
5281
5282
5283void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5284 uint16_t uFpuOpcode) RT_NOEXCEPT
5285{
5286 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5287 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5288 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5289 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5290 iemFpuMaybePopOne(pFpuCtx);
5291}
5292
5293
5294void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5295{
5296 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5297 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5298 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5299 iemFpuMaybePopOne(pFpuCtx);
5300 iemFpuMaybePopOne(pFpuCtx);
5301}
5302
5303
5304void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5305{
5306 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5307 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5308
5309 if (pFpuCtx->FCW & X86_FCW_IM)
5310 {
5311 /* Masked overflow - Push QNaN. */
5312 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5313 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5314 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5315 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5316 pFpuCtx->FTW |= RT_BIT(iNewTop);
5317 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5318 iemFpuRotateStackPush(pFpuCtx);
5319 }
5320 else
5321 {
5322 /* Exception pending - don't change TOP or the register stack. */
5323 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5324 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5325 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5327 }
5328}
5329
5330
5331void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5332{
5333 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5334 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5335
5336 if (pFpuCtx->FCW & X86_FCW_IM)
5337 {
5338 /* Masked overflow - Push QNaN. */
5339 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5340 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5341 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5342 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5343 pFpuCtx->FTW |= RT_BIT(iNewTop);
5344 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5345 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5346 iemFpuRotateStackPush(pFpuCtx);
5347 }
5348 else
5349 {
5350 /* Exception pending - don't change TOP or the register stack. */
5351 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5352 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5353 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5354 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5355 }
5356}
5357
5358
5359/**
5360 * Worker routine for raising an FPU stack overflow exception on a push.
5361 *
5362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5363 * @param pFpuCtx The FPU context.
5364 */
5365static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5366{
5367 if (pFpuCtx->FCW & X86_FCW_IM)
5368 {
5369 /* Masked overflow. */
5370 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5371 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5372 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5373 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5374 pFpuCtx->FTW |= RT_BIT(iNewTop);
5375 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5376 iemFpuRotateStackPush(pFpuCtx);
5377 }
5378 else
5379 {
5380 /* Exception pending - don't change TOP or the register stack. */
5381 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5382 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5383 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5384 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5385 }
5386 RT_NOREF(pVCpu);
5387}
5388
5389
5390/**
5391 * Raises a FPU stack overflow exception on a push.
5392 *
5393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5394 * @param uFpuOpcode The FPU opcode value.
5395 */
5396void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5397{
5398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5399 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5400 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5401}
5402
5403
5404/**
5405 * Raises a FPU stack overflow exception on a push with a memory operand.
5406 *
5407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5408 * @param iEffSeg The effective memory operand selector register.
5409 * @param GCPtrEff The effective memory operand offset.
5410 * @param uFpuOpcode The FPU opcode value.
5411 */
5412void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5413{
5414 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5415 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5416 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5417 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5418}
5419
5420/** @} */
5421
5422
5423/** @name SSE+AVX SIMD access and helpers.
5424 *
5425 * @{
5426 */
5427/**
5428 * Stores a result in a SIMD XMM register, updates the MXCSR.
5429 *
5430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5431 * @param pResult The result to store.
5432 * @param iXmmReg Which SIMD XMM register to store the result in.
5433 */
5434void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5435{
5436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5437 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5438
5439 /* The result is only updated if there is no unmasked exception pending. */
5440 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5441 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5442 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5443}
5444
5445
5446/**
5447 * Updates the MXCSR.
5448 *
5449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5450 * @param fMxcsr The new MXCSR value.
5451 */
5452void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5453{
5454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5455 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5456}
5457/** @} */
5458
5459
5460/** @name Memory access.
5461 *
5462 * @{
5463 */
5464
5465#undef LOG_GROUP
5466#define LOG_GROUP LOG_GROUP_IEM_MEM
5467
5468/**
5469 * Updates the IEMCPU::cbWritten counter if applicable.
5470 *
5471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5472 * @param fAccess The access being accounted for.
5473 * @param cbMem The access size.
5474 */
5475DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5476{
5477 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5478 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5479 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5480}
5481
5482
5483/**
5484 * Applies the segment limit, base and attributes.
5485 *
5486 * This may raise a \#GP or \#SS.
5487 *
5488 * @returns VBox strict status code.
5489 *
5490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5491 * @param fAccess The kind of access which is being performed.
5492 * @param iSegReg The index of the segment register to apply.
5493 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5494 * TSS, ++).
5495 * @param cbMem The access size.
5496 * @param pGCPtrMem Pointer to the guest memory address to apply
5497 * segmentation to. Input and output parameter.
5498 */
5499VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5500{
5501 if (iSegReg == UINT8_MAX)
5502 return VINF_SUCCESS;
5503
5504 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5505 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5506 switch (IEM_GET_CPU_MODE(pVCpu))
5507 {
5508 case IEMMODE_16BIT:
5509 case IEMMODE_32BIT:
5510 {
5511 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5512 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5513
5514 if ( pSel->Attr.n.u1Present
5515 && !pSel->Attr.n.u1Unusable)
5516 {
5517 Assert(pSel->Attr.n.u1DescType);
5518 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5519 {
5520 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5521 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5522 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5523
5524 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5525 {
5526 /** @todo CPL check. */
5527 }
5528
5529 /*
5530 * There are two kinds of data selectors, normal and expand down.
5531 */
5532 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5533 {
5534 if ( GCPtrFirst32 > pSel->u32Limit
5535 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5536 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5537 }
5538 else
5539 {
5540 /*
5541 * The upper boundary is defined by the B bit, not the G bit!
5542 */
5543 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5544 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5545 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5546 }
5547 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5548 }
5549 else
5550 {
5551 /*
5552 * Code selector and usually be used to read thru, writing is
5553 * only permitted in real and V8086 mode.
5554 */
5555 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5556 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5557 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5558 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5559 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5560
5561 if ( GCPtrFirst32 > pSel->u32Limit
5562 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5563 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5564
5565 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5566 {
5567 /** @todo CPL check. */
5568 }
5569
5570 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5571 }
5572 }
5573 else
5574 return iemRaiseGeneralProtectionFault0(pVCpu);
5575 return VINF_SUCCESS;
5576 }
5577
5578 case IEMMODE_64BIT:
5579 {
5580 RTGCPTR GCPtrMem = *pGCPtrMem;
5581 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5582 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5583
5584 Assert(cbMem >= 1);
5585 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5586 return VINF_SUCCESS;
5587 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5588 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5589 return iemRaiseGeneralProtectionFault0(pVCpu);
5590 }
5591
5592 default:
5593 AssertFailedReturn(VERR_IEM_IPE_7);
5594 }
5595}
5596
5597
5598/**
5599 * Translates a virtual address to a physical physical address and checks if we
5600 * can access the page as specified.
5601 *
5602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5603 * @param GCPtrMem The virtual address.
5604 * @param cbAccess The access size, for raising \#PF correctly for
5605 * FXSAVE and such.
5606 * @param fAccess The intended access.
5607 * @param pGCPhysMem Where to return the physical address.
5608 */
5609VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5610 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5611{
5612 /** @todo Need a different PGM interface here. We're currently using
5613 * generic / REM interfaces. this won't cut it for R0. */
5614 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5615 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5616 * here. */
5617 PGMPTWALK Walk;
5618 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5619 if (RT_FAILURE(rc))
5620 {
5621 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5622 /** @todo Check unassigned memory in unpaged mode. */
5623 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5624#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5625 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5626 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5627#endif
5628 *pGCPhysMem = NIL_RTGCPHYS;
5629 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5630 }
5631
5632 /* If the page is writable and does not have the no-exec bit set, all
5633 access is allowed. Otherwise we'll have to check more carefully... */
5634 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5635 {
5636 /* Write to read only memory? */
5637 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5638 && !(Walk.fEffective & X86_PTE_RW)
5639 && ( ( IEM_GET_CPL(pVCpu) == 3
5640 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5641 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5642 {
5643 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5644 *pGCPhysMem = NIL_RTGCPHYS;
5645#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5646 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5647 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5648#endif
5649 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5650 }
5651
5652 /* Kernel memory accessed by userland? */
5653 if ( !(Walk.fEffective & X86_PTE_US)
5654 && IEM_GET_CPL(pVCpu) == 3
5655 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5656 {
5657 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5658 *pGCPhysMem = NIL_RTGCPHYS;
5659#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5660 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5661 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5662#endif
5663 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5664 }
5665
5666 /* Executing non-executable memory? */
5667 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5668 && (Walk.fEffective & X86_PTE_PAE_NX)
5669 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5670 {
5671 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5672 *pGCPhysMem = NIL_RTGCPHYS;
5673#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5674 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5675 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5676#endif
5677 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5678 VERR_ACCESS_DENIED);
5679 }
5680 }
5681
5682 /*
5683 * Set the dirty / access flags.
5684 * ASSUMES this is set when the address is translated rather than on committ...
5685 */
5686 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5687 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5688 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5689 {
5690 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5691 AssertRC(rc2);
5692 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5693 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5694 }
5695
5696 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5697 *pGCPhysMem = GCPhys;
5698 return VINF_SUCCESS;
5699}
5700
5701
5702/**
5703 * Looks up a memory mapping entry.
5704 *
5705 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5707 * @param pvMem The memory address.
5708 * @param fAccess The access to.
5709 */
5710DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5711{
5712 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5713 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5714 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5715 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5716 return 0;
5717 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5718 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5719 return 1;
5720 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5721 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5722 return 2;
5723 return VERR_NOT_FOUND;
5724}
5725
5726
5727/**
5728 * Finds a free memmap entry when using iNextMapping doesn't work.
5729 *
5730 * @returns Memory mapping index, 1024 on failure.
5731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5732 */
5733static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5734{
5735 /*
5736 * The easy case.
5737 */
5738 if (pVCpu->iem.s.cActiveMappings == 0)
5739 {
5740 pVCpu->iem.s.iNextMapping = 1;
5741 return 0;
5742 }
5743
5744 /* There should be enough mappings for all instructions. */
5745 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5746
5747 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5748 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5749 return i;
5750
5751 AssertFailedReturn(1024);
5752}
5753
5754
5755/**
5756 * Commits a bounce buffer that needs writing back and unmaps it.
5757 *
5758 * @returns Strict VBox status code.
5759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5760 * @param iMemMap The index of the buffer to commit.
5761 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5762 * Always false in ring-3, obviously.
5763 */
5764static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5765{
5766 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5767 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5768#ifdef IN_RING3
5769 Assert(!fPostponeFail);
5770 RT_NOREF_PV(fPostponeFail);
5771#endif
5772
5773 /*
5774 * Do the writing.
5775 */
5776 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5777 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5778 {
5779 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5780 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5781 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5782 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5783 {
5784 /*
5785 * Carefully and efficiently dealing with access handler return
5786 * codes make this a little bloated.
5787 */
5788 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5789 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5790 pbBuf,
5791 cbFirst,
5792 PGMACCESSORIGIN_IEM);
5793 if (rcStrict == VINF_SUCCESS)
5794 {
5795 if (cbSecond)
5796 {
5797 rcStrict = PGMPhysWrite(pVM,
5798 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5799 pbBuf + cbFirst,
5800 cbSecond,
5801 PGMACCESSORIGIN_IEM);
5802 if (rcStrict == VINF_SUCCESS)
5803 { /* nothing */ }
5804 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5805 {
5806 LogEx(LOG_GROUP_IEM,
5807 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5810 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5811 }
5812#ifndef IN_RING3
5813 else if (fPostponeFail)
5814 {
5815 LogEx(LOG_GROUP_IEM,
5816 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5819 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5820 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5821 return iemSetPassUpStatus(pVCpu, rcStrict);
5822 }
5823#endif
5824 else
5825 {
5826 LogEx(LOG_GROUP_IEM,
5827 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5828 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5829 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5830 return rcStrict;
5831 }
5832 }
5833 }
5834 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5835 {
5836 if (!cbSecond)
5837 {
5838 LogEx(LOG_GROUP_IEM,
5839 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5841 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5842 }
5843 else
5844 {
5845 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5846 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5847 pbBuf + cbFirst,
5848 cbSecond,
5849 PGMACCESSORIGIN_IEM);
5850 if (rcStrict2 == VINF_SUCCESS)
5851 {
5852 LogEx(LOG_GROUP_IEM,
5853 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5856 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5857 }
5858 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5859 {
5860 LogEx(LOG_GROUP_IEM,
5861 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5864 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5865 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5866 }
5867#ifndef IN_RING3
5868 else if (fPostponeFail)
5869 {
5870 LogEx(LOG_GROUP_IEM,
5871 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5872 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5874 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5875 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5876 return iemSetPassUpStatus(pVCpu, rcStrict);
5877 }
5878#endif
5879 else
5880 {
5881 LogEx(LOG_GROUP_IEM,
5882 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5885 return rcStrict2;
5886 }
5887 }
5888 }
5889#ifndef IN_RING3
5890 else if (fPostponeFail)
5891 {
5892 LogEx(LOG_GROUP_IEM,
5893 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5896 if (!cbSecond)
5897 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5898 else
5899 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5900 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5901 return iemSetPassUpStatus(pVCpu, rcStrict);
5902 }
5903#endif
5904 else
5905 {
5906 LogEx(LOG_GROUP_IEM,
5907 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5908 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5909 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5910 return rcStrict;
5911 }
5912 }
5913 else
5914 {
5915 /*
5916 * No access handlers, much simpler.
5917 */
5918 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5919 if (RT_SUCCESS(rc))
5920 {
5921 if (cbSecond)
5922 {
5923 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5924 if (RT_SUCCESS(rc))
5925 { /* likely */ }
5926 else
5927 {
5928 LogEx(LOG_GROUP_IEM,
5929 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5930 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5932 return rc;
5933 }
5934 }
5935 }
5936 else
5937 {
5938 LogEx(LOG_GROUP_IEM,
5939 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5940 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5941 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5942 return rc;
5943 }
5944 }
5945 }
5946
5947#if defined(IEM_LOG_MEMORY_WRITES)
5948 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5949 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5950 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5951 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5952 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5953 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5954
5955 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5956 g_cbIemWrote = cbWrote;
5957 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5958#endif
5959
5960 /*
5961 * Free the mapping entry.
5962 */
5963 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5964 Assert(pVCpu->iem.s.cActiveMappings != 0);
5965 pVCpu->iem.s.cActiveMappings--;
5966 return VINF_SUCCESS;
5967}
5968
5969
5970/**
5971 * iemMemMap worker that deals with a request crossing pages.
5972 */
5973static VBOXSTRICTRC
5974iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
5975 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5976{
5977 Assert(cbMem <= GUEST_PAGE_SIZE);
5978
5979 /*
5980 * Do the address translations.
5981 */
5982 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5983 RTGCPHYS GCPhysFirst;
5984 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5985 if (rcStrict != VINF_SUCCESS)
5986 return rcStrict;
5987 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5988
5989 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5990 RTGCPHYS GCPhysSecond;
5991 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5992 cbSecondPage, fAccess, &GCPhysSecond);
5993 if (rcStrict != VINF_SUCCESS)
5994 return rcStrict;
5995 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5996 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5997
5998 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5999
6000 /*
6001 * Read in the current memory content if it's a read, execute or partial
6002 * write access.
6003 */
6004 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6005
6006 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6007 {
6008 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6009 {
6010 /*
6011 * Must carefully deal with access handler status codes here,
6012 * makes the code a bit bloated.
6013 */
6014 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6015 if (rcStrict == VINF_SUCCESS)
6016 {
6017 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6018 if (rcStrict == VINF_SUCCESS)
6019 { /*likely */ }
6020 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6021 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6022 else
6023 {
6024 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6025 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6026 return rcStrict;
6027 }
6028 }
6029 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6030 {
6031 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6032 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6033 {
6034 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6035 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6036 }
6037 else
6038 {
6039 LogEx(LOG_GROUP_IEM,
6040 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6041 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6042 return rcStrict2;
6043 }
6044 }
6045 else
6046 {
6047 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6048 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6049 return rcStrict;
6050 }
6051 }
6052 else
6053 {
6054 /*
6055 * No informational status codes here, much more straight forward.
6056 */
6057 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6058 if (RT_SUCCESS(rc))
6059 {
6060 Assert(rc == VINF_SUCCESS);
6061 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6062 if (RT_SUCCESS(rc))
6063 Assert(rc == VINF_SUCCESS);
6064 else
6065 {
6066 LogEx(LOG_GROUP_IEM,
6067 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6068 return rc;
6069 }
6070 }
6071 else
6072 {
6073 LogEx(LOG_GROUP_IEM,
6074 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6075 return rc;
6076 }
6077 }
6078 }
6079#ifdef VBOX_STRICT
6080 else
6081 memset(pbBuf, 0xcc, cbMem);
6082 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6083 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6084#endif
6085 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6086
6087 /*
6088 * Commit the bounce buffer entry.
6089 */
6090 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6091 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6092 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6093 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6094 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6095 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6096 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6097 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6098 pVCpu->iem.s.cActiveMappings++;
6099
6100 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6101 *ppvMem = pbBuf;
6102 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6103 return VINF_SUCCESS;
6104}
6105
6106
6107/**
6108 * iemMemMap woker that deals with iemMemPageMap failures.
6109 */
6110static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6111 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6112{
6113 /*
6114 * Filter out conditions we can handle and the ones which shouldn't happen.
6115 */
6116 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6117 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6118 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6119 {
6120 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6121 return rcMap;
6122 }
6123 pVCpu->iem.s.cPotentialExits++;
6124
6125 /*
6126 * Read in the current memory content if it's a read, execute or partial
6127 * write access.
6128 */
6129 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6130 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6131 {
6132 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6133 memset(pbBuf, 0xff, cbMem);
6134 else
6135 {
6136 int rc;
6137 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6138 {
6139 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6140 if (rcStrict == VINF_SUCCESS)
6141 { /* nothing */ }
6142 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6143 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6144 else
6145 {
6146 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6147 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6148 return rcStrict;
6149 }
6150 }
6151 else
6152 {
6153 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6154 if (RT_SUCCESS(rc))
6155 { /* likely */ }
6156 else
6157 {
6158 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6159 GCPhysFirst, rc));
6160 return rc;
6161 }
6162 }
6163 }
6164 }
6165#ifdef VBOX_STRICT
6166 else
6167 memset(pbBuf, 0xcc, cbMem);
6168#endif
6169#ifdef VBOX_STRICT
6170 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6171 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6172#endif
6173
6174 /*
6175 * Commit the bounce buffer entry.
6176 */
6177 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6178 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6179 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6180 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6181 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6182 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6183 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6184 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6185 pVCpu->iem.s.cActiveMappings++;
6186
6187 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6188 *ppvMem = pbBuf;
6189 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6190 return VINF_SUCCESS;
6191}
6192
6193
6194
6195/**
6196 * Maps the specified guest memory for the given kind of access.
6197 *
6198 * This may be using bounce buffering of the memory if it's crossing a page
6199 * boundary or if there is an access handler installed for any of it. Because
6200 * of lock prefix guarantees, we're in for some extra clutter when this
6201 * happens.
6202 *
6203 * This may raise a \#GP, \#SS, \#PF or \#AC.
6204 *
6205 * @returns VBox strict status code.
6206 *
6207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6208 * @param ppvMem Where to return the pointer to the mapped memory.
6209 * @param pbUnmapInfo Where to return unmap info to be passed to
6210 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6211 * done.
6212 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6213 * 8, 12, 16, 32 or 512. When used by string operations
6214 * it can be up to a page.
6215 * @param iSegReg The index of the segment register to use for this
6216 * access. The base and limits are checked. Use UINT8_MAX
6217 * to indicate that no segmentation is required (for IDT,
6218 * GDT and LDT accesses).
6219 * @param GCPtrMem The address of the guest memory.
6220 * @param fAccess How the memory is being accessed. The
6221 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6222 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6223 * when raising exceptions.
6224 * @param uAlignCtl Alignment control:
6225 * - Bits 15:0 is the alignment mask.
6226 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6227 * IEM_MEMMAP_F_ALIGN_SSE, and
6228 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6229 * Pass zero to skip alignment.
6230 */
6231VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6232 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6233{
6234 /*
6235 * Check the input and figure out which mapping entry to use.
6236 */
6237 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6238 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6239 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6240 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6241 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6242
6243 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6244 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6245 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6246 {
6247 iMemMap = iemMemMapFindFree(pVCpu);
6248 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6249 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6250 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6251 pVCpu->iem.s.aMemMappings[2].fAccess),
6252 VERR_IEM_IPE_9);
6253 }
6254
6255 /*
6256 * Map the memory, checking that we can actually access it. If something
6257 * slightly complicated happens, fall back on bounce buffering.
6258 */
6259 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6260 if (rcStrict == VINF_SUCCESS)
6261 { /* likely */ }
6262 else
6263 return rcStrict;
6264
6265 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6266 { /* likely */ }
6267 else
6268 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6269
6270 /*
6271 * Alignment check.
6272 */
6273 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6274 { /* likelyish */ }
6275 else
6276 {
6277 /* Misaligned access. */
6278 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6279 {
6280 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6281 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6282 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6283 {
6284 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6285
6286 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6287 return iemRaiseAlignmentCheckException(pVCpu);
6288 }
6289 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6290 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6291 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6292 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6293 * that's what FXSAVE does on a 10980xe. */
6294 && iemMemAreAlignmentChecksEnabled(pVCpu))
6295 return iemRaiseAlignmentCheckException(pVCpu);
6296 else
6297 return iemRaiseGeneralProtectionFault0(pVCpu);
6298 }
6299 }
6300
6301#ifdef IEM_WITH_DATA_TLB
6302 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6303
6304 /*
6305 * Get the TLB entry for this page.
6306 */
6307 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6308 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6309 if (pTlbe->uTag == uTag)
6310 {
6311# ifdef VBOX_WITH_STATISTICS
6312 pVCpu->iem.s.DataTlb.cTlbHits++;
6313# endif
6314 }
6315 else
6316 {
6317 pVCpu->iem.s.DataTlb.cTlbMisses++;
6318 PGMPTWALK Walk;
6319 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6320 if (RT_FAILURE(rc))
6321 {
6322 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6323# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6324 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6325 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6326# endif
6327 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6328 }
6329
6330 Assert(Walk.fSucceeded);
6331 pTlbe->uTag = uTag;
6332 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6333 pTlbe->GCPhys = Walk.GCPhys;
6334 pTlbe->pbMappingR3 = NULL;
6335 }
6336
6337 /*
6338 * Check TLB page table level access flags.
6339 */
6340 /* If the page is either supervisor only or non-writable, we need to do
6341 more careful access checks. */
6342 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6343 {
6344 /* Write to read only memory? */
6345 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6346 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6347 && ( ( IEM_GET_CPL(pVCpu) == 3
6348 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6349 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6350 {
6351 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6352# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6353 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6354 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6355# endif
6356 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6357 }
6358
6359 /* Kernel memory accessed by userland? */
6360 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6361 && IEM_GET_CPL(pVCpu) == 3
6362 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6363 {
6364 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6365# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6366 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6367 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6368# endif
6369 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6370 }
6371 }
6372
6373 /*
6374 * Set the dirty / access flags.
6375 * ASSUMES this is set when the address is translated rather than on commit...
6376 */
6377 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6378 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6379 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6380 {
6381 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6382 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6383 AssertRC(rc2);
6384 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6385 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6386 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6387 }
6388
6389 /*
6390 * Look up the physical page info if necessary.
6391 */
6392 uint8_t *pbMem = NULL;
6393 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6394# ifdef IN_RING3
6395 pbMem = pTlbe->pbMappingR3;
6396# else
6397 pbMem = NULL;
6398# endif
6399 else
6400 {
6401 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6402 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6403 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6404 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6405 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6406 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6407 { /* likely */ }
6408 else
6409 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6410 pTlbe->pbMappingR3 = NULL;
6411 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6412 | IEMTLBE_F_NO_MAPPINGR3
6413 | IEMTLBE_F_PG_NO_READ
6414 | IEMTLBE_F_PG_NO_WRITE
6415 | IEMTLBE_F_PG_UNASSIGNED
6416 | IEMTLBE_F_PG_CODE_PAGE);
6417 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6418 &pbMem, &pTlbe->fFlagsAndPhysRev);
6419 AssertRCReturn(rc, rc);
6420# ifdef IN_RING3
6421 pTlbe->pbMappingR3 = pbMem;
6422# endif
6423 }
6424
6425 /*
6426 * Check the physical page level access and mapping.
6427 */
6428 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6429 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6430 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6431 { /* probably likely */ }
6432 else
6433 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6434 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6435 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6436 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6437 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6438 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6439
6440 if (pbMem)
6441 {
6442 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6443 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6444 fAccess |= IEM_ACCESS_NOT_LOCKED;
6445 }
6446 else
6447 {
6448 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6449 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6450 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6451 if (rcStrict != VINF_SUCCESS)
6452 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6453 }
6454
6455 void * const pvMem = pbMem;
6456
6457 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6458 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6459 if (fAccess & IEM_ACCESS_TYPE_READ)
6460 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6461
6462#else /* !IEM_WITH_DATA_TLB */
6463
6464 RTGCPHYS GCPhysFirst;
6465 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6466 if (rcStrict != VINF_SUCCESS)
6467 return rcStrict;
6468
6469 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6470 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6471 if (fAccess & IEM_ACCESS_TYPE_READ)
6472 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6473
6474 void *pvMem;
6475 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6476 if (rcStrict != VINF_SUCCESS)
6477 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6478
6479#endif /* !IEM_WITH_DATA_TLB */
6480
6481 /*
6482 * Fill in the mapping table entry.
6483 */
6484 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6485 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6486 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6487 pVCpu->iem.s.cActiveMappings += 1;
6488
6489 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6490 *ppvMem = pvMem;
6491 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6492 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6493 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6494
6495 return VINF_SUCCESS;
6496}
6497
6498
6499/**
6500 * Commits the guest memory if bounce buffered and unmaps it.
6501 *
6502 * @returns Strict VBox status code.
6503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6504 * @param bUnmapInfo Unmap info set by iemMemMap.
6505 */
6506VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6507{
6508 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6509 AssertMsgReturn( (bUnmapInfo & 0x08)
6510 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6511 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6512 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6513 VERR_NOT_FOUND);
6514
6515 /* If it's bounce buffered, we may need to write back the buffer. */
6516 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6517 {
6518 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6519 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6520 }
6521 /* Otherwise unlock it. */
6522 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6523 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6524
6525 /* Free the entry. */
6526 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6527 Assert(pVCpu->iem.s.cActiveMappings != 0);
6528 pVCpu->iem.s.cActiveMappings--;
6529 return VINF_SUCCESS;
6530}
6531
6532
6533/**
6534 * Rolls back the guest memory (conceptually only) and unmaps it.
6535 *
6536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6537 * @param bUnmapInfo Unmap info set by iemMemMap.
6538 */
6539void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6540{
6541 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6542 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6543 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6544 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6545 == ((unsigned)bUnmapInfo >> 4),
6546 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6547
6548 /* Unlock it if necessary. */
6549 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6550 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6551
6552 /* Free the entry. */
6553 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6554 Assert(pVCpu->iem.s.cActiveMappings != 0);
6555 pVCpu->iem.s.cActiveMappings--;
6556}
6557
6558#ifdef IEM_WITH_SETJMP
6559
6560/**
6561 * Maps the specified guest memory for the given kind of access, longjmp on
6562 * error.
6563 *
6564 * This may be using bounce buffering of the memory if it's crossing a page
6565 * boundary or if there is an access handler installed for any of it. Because
6566 * of lock prefix guarantees, we're in for some extra clutter when this
6567 * happens.
6568 *
6569 * This may raise a \#GP, \#SS, \#PF or \#AC.
6570 *
6571 * @returns Pointer to the mapped memory.
6572 *
6573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6574 * @param bUnmapInfo Where to return unmap info to be passed to
6575 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6576 * iemMemCommitAndUnmapWoSafeJmp,
6577 * iemMemCommitAndUnmapRoSafeJmp,
6578 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6579 * when done.
6580 * @param cbMem The number of bytes to map. This is usually 1,
6581 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6582 * string operations it can be up to a page.
6583 * @param iSegReg The index of the segment register to use for
6584 * this access. The base and limits are checked.
6585 * Use UINT8_MAX to indicate that no segmentation
6586 * is required (for IDT, GDT and LDT accesses).
6587 * @param GCPtrMem The address of the guest memory.
6588 * @param fAccess How the memory is being accessed. The
6589 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6590 * how to map the memory, while the
6591 * IEM_ACCESS_WHAT_XXX bit is used when raising
6592 * exceptions.
6593 * @param uAlignCtl Alignment control:
6594 * - Bits 15:0 is the alignment mask.
6595 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6596 * IEM_MEMMAP_F_ALIGN_SSE, and
6597 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6598 * Pass zero to skip alignment.
6599 */
6600void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6601 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6602{
6603 /*
6604 * Check the input, check segment access and adjust address
6605 * with segment base.
6606 */
6607 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6608 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6609 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6610
6611 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6612 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6613 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6614
6615 /*
6616 * Alignment check.
6617 */
6618 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6619 { /* likelyish */ }
6620 else
6621 {
6622 /* Misaligned access. */
6623 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6624 {
6625 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6626 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6627 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6628 {
6629 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6630
6631 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6632 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6633 }
6634 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6635 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6636 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6637 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6638 * that's what FXSAVE does on a 10980xe. */
6639 && iemMemAreAlignmentChecksEnabled(pVCpu))
6640 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6641 else
6642 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6643 }
6644 }
6645
6646 /*
6647 * Figure out which mapping entry to use.
6648 */
6649 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6650 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6651 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6652 {
6653 iMemMap = iemMemMapFindFree(pVCpu);
6654 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6655 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6656 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6657 pVCpu->iem.s.aMemMappings[2].fAccess),
6658 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6659 }
6660
6661 /*
6662 * Crossing a page boundary?
6663 */
6664 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6665 { /* No (likely). */ }
6666 else
6667 {
6668 void *pvMem;
6669 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6670 if (rcStrict == VINF_SUCCESS)
6671 return pvMem;
6672 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6673 }
6674
6675#ifdef IEM_WITH_DATA_TLB
6676 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6677
6678 /*
6679 * Get the TLB entry for this page.
6680 */
6681 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6682 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6683 if (pTlbe->uTag == uTag)
6684 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6685 else
6686 {
6687 pVCpu->iem.s.DataTlb.cTlbMisses++;
6688 PGMPTWALK Walk;
6689 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6690 if (RT_FAILURE(rc))
6691 {
6692 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6693# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6694 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6695 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6696# endif
6697 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6698 }
6699
6700 Assert(Walk.fSucceeded);
6701 pTlbe->uTag = uTag;
6702 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6703 pTlbe->GCPhys = Walk.GCPhys;
6704 pTlbe->pbMappingR3 = NULL;
6705 }
6706
6707 /*
6708 * Check the flags and physical revision.
6709 */
6710 /** @todo make the caller pass these in with fAccess. */
6711 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6712 ? IEMTLBE_F_PT_NO_USER : 0;
6713 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6714 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6715 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6716 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6717 ? IEMTLBE_F_PT_NO_WRITE : 0)
6718 : 0;
6719 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6720 uint8_t *pbMem = NULL;
6721 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6722 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6723# ifdef IN_RING3
6724 pbMem = pTlbe->pbMappingR3;
6725# else
6726 pbMem = NULL;
6727# endif
6728 else
6729 {
6730 /*
6731 * Okay, something isn't quite right or needs refreshing.
6732 */
6733 /* Write to read only memory? */
6734 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6735 {
6736 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6737# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6738 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6739 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6740# endif
6741 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6742 }
6743
6744 /* Kernel memory accessed by userland? */
6745 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6746 {
6747 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6748# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6749 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6750 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6751# endif
6752 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6753 }
6754
6755 /* Set the dirty / access flags.
6756 ASSUMES this is set when the address is translated rather than on commit... */
6757 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6758 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6759 {
6760 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6761 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6762 AssertRC(rc2);
6763 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6764 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6765 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6766 }
6767
6768 /*
6769 * Check if the physical page info needs updating.
6770 */
6771 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6772# ifdef IN_RING3
6773 pbMem = pTlbe->pbMappingR3;
6774# else
6775 pbMem = NULL;
6776# endif
6777 else
6778 {
6779 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6780 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6781 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6782 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6783 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6784 pTlbe->pbMappingR3 = NULL;
6785 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6786 | IEMTLBE_F_NO_MAPPINGR3
6787 | IEMTLBE_F_PG_NO_READ
6788 | IEMTLBE_F_PG_NO_WRITE
6789 | IEMTLBE_F_PG_UNASSIGNED
6790 | IEMTLBE_F_PG_CODE_PAGE);
6791 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6792 &pbMem, &pTlbe->fFlagsAndPhysRev);
6793 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6794# ifdef IN_RING3
6795 pTlbe->pbMappingR3 = pbMem;
6796# endif
6797 }
6798
6799 /*
6800 * Check the physical page level access and mapping.
6801 */
6802 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6803 { /* probably likely */ }
6804 else
6805 {
6806 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6807 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6808 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6809 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6810 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6811 if (rcStrict == VINF_SUCCESS)
6812 return pbMem;
6813 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6814 }
6815 }
6816 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6817
6818 if (pbMem)
6819 {
6820 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6821 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6822 fAccess |= IEM_ACCESS_NOT_LOCKED;
6823 }
6824 else
6825 {
6826 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6827 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6828 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6829 if (rcStrict == VINF_SUCCESS)
6830 {
6831 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6832 return pbMem;
6833 }
6834 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6835 }
6836
6837 void * const pvMem = pbMem;
6838
6839 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6840 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6841 if (fAccess & IEM_ACCESS_TYPE_READ)
6842 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6843
6844#else /* !IEM_WITH_DATA_TLB */
6845
6846
6847 RTGCPHYS GCPhysFirst;
6848 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6849 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6850 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6851
6852 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6853 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6854 if (fAccess & IEM_ACCESS_TYPE_READ)
6855 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6856
6857 void *pvMem;
6858 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6859 if (rcStrict == VINF_SUCCESS)
6860 { /* likely */ }
6861 else
6862 {
6863 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6864 if (rcStrict == VINF_SUCCESS)
6865 return pvMem;
6866 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6867 }
6868
6869#endif /* !IEM_WITH_DATA_TLB */
6870
6871 /*
6872 * Fill in the mapping table entry.
6873 */
6874 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6875 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6876 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6877 pVCpu->iem.s.cActiveMappings++;
6878
6879 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6880
6881 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6882 return pvMem;
6883}
6884
6885
6886/**
6887 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6888 *
6889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6890 * @param pvMem The mapping.
6891 * @param fAccess The kind of access.
6892 */
6893void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6894{
6895 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6896 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6897 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6898 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6899 == ((unsigned)bUnmapInfo >> 4),
6900 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6901
6902 /* If it's bounce buffered, we may need to write back the buffer. */
6903 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6904 {
6905 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6906 {
6907 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6908 if (rcStrict == VINF_SUCCESS)
6909 return;
6910 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6911 }
6912 }
6913 /* Otherwise unlock it. */
6914 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6915 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6916
6917 /* Free the entry. */
6918 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6919 Assert(pVCpu->iem.s.cActiveMappings != 0);
6920 pVCpu->iem.s.cActiveMappings--;
6921}
6922
6923
6924/** Fallback for iemMemCommitAndUnmapRwJmp. */
6925void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6926{
6927 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
6928 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6929}
6930
6931
6932/** Fallback for iemMemCommitAndUnmapWoJmp. */
6933void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6934{
6935 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6936 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6937}
6938
6939
6940/** Fallback for iemMemCommitAndUnmapRoJmp. */
6941void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6942{
6943 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
6944 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
6945}
6946
6947
6948/** Fallback for iemMemRollbackAndUnmapWo. */
6949void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6950{
6951 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
6952 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
6953}
6954
6955#endif /* IEM_WITH_SETJMP */
6956
6957#ifndef IN_RING3
6958/**
6959 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6960 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6961 *
6962 * Allows the instruction to be completed and retired, while the IEM user will
6963 * return to ring-3 immediately afterwards and do the postponed writes there.
6964 *
6965 * @returns VBox status code (no strict statuses). Caller must check
6966 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6968 * @param pvMem The mapping.
6969 * @param fAccess The kind of access.
6970 */
6971VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6972{
6973 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6974 AssertMsgReturn( (bUnmapInfo & 0x08)
6975 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6976 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6977 == ((unsigned)bUnmapInfo >> 4),
6978 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6979 VERR_NOT_FOUND);
6980
6981 /* If it's bounce buffered, we may need to write back the buffer. */
6982 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6983 {
6984 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6985 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6986 }
6987 /* Otherwise unlock it. */
6988 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6989 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6990
6991 /* Free the entry. */
6992 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6993 Assert(pVCpu->iem.s.cActiveMappings != 0);
6994 pVCpu->iem.s.cActiveMappings--;
6995 return VINF_SUCCESS;
6996}
6997#endif
6998
6999
7000/**
7001 * Rollbacks mappings, releasing page locks and such.
7002 *
7003 * The caller shall only call this after checking cActiveMappings.
7004 *
7005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7006 */
7007void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7008{
7009 Assert(pVCpu->iem.s.cActiveMappings > 0);
7010
7011 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7012 while (iMemMap-- > 0)
7013 {
7014 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7015 if (fAccess != IEM_ACCESS_INVALID)
7016 {
7017 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7018 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7019 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7020 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7021 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7022 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7023 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7024 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7025 pVCpu->iem.s.cActiveMappings--;
7026 }
7027 }
7028}
7029
7030
7031/*
7032 * Instantiate R/W templates.
7033 */
7034#define TMPL_MEM_WITH_STACK
7035
7036#define TMPL_MEM_TYPE uint8_t
7037#define TMPL_MEM_FN_SUFF U8
7038#define TMPL_MEM_FMT_TYPE "%#04x"
7039#define TMPL_MEM_FMT_DESC "byte"
7040#include "IEMAllMemRWTmpl.cpp.h"
7041
7042#define TMPL_MEM_TYPE uint16_t
7043#define TMPL_MEM_FN_SUFF U16
7044#define TMPL_MEM_FMT_TYPE "%#06x"
7045#define TMPL_MEM_FMT_DESC "word"
7046#include "IEMAllMemRWTmpl.cpp.h"
7047
7048#define TMPL_WITH_PUSH_SREG
7049#define TMPL_MEM_TYPE uint32_t
7050#define TMPL_MEM_FN_SUFF U32
7051#define TMPL_MEM_FMT_TYPE "%#010x"
7052#define TMPL_MEM_FMT_DESC "dword"
7053#include "IEMAllMemRWTmpl.cpp.h"
7054#undef TMPL_WITH_PUSH_SREG
7055
7056#define TMPL_MEM_TYPE uint64_t
7057#define TMPL_MEM_FN_SUFF U64
7058#define TMPL_MEM_FMT_TYPE "%#018RX64"
7059#define TMPL_MEM_FMT_DESC "qword"
7060#include "IEMAllMemRWTmpl.cpp.h"
7061
7062#undef TMPL_MEM_WITH_STACK
7063
7064#define TMPL_MEM_TYPE uint64_t
7065#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7066#define TMPL_MEM_FN_SUFF U64AlignedU128
7067#define TMPL_MEM_FMT_TYPE "%#018RX64"
7068#define TMPL_MEM_FMT_DESC "qword"
7069#include "IEMAllMemRWTmpl.cpp.h"
7070
7071/* See IEMAllMemRWTmplInline.cpp.h */
7072#define TMPL_MEM_BY_REF
7073
7074#define TMPL_MEM_TYPE RTFLOAT80U
7075#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7076#define TMPL_MEM_FN_SUFF R80
7077#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7078#define TMPL_MEM_FMT_DESC "tword"
7079#include "IEMAllMemRWTmpl.cpp.h"
7080
7081#define TMPL_MEM_TYPE RTPBCD80U
7082#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7083#define TMPL_MEM_FN_SUFF D80
7084#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7085#define TMPL_MEM_FMT_DESC "tword"
7086#include "IEMAllMemRWTmpl.cpp.h"
7087
7088#define TMPL_MEM_TYPE RTUINT128U
7089#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7090#define TMPL_MEM_FN_SUFF U128
7091#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7092#define TMPL_MEM_FMT_DESC "dqword"
7093#include "IEMAllMemRWTmpl.cpp.h"
7094
7095
7096/**
7097 * Fetches a data dword and zero extends it to a qword.
7098 *
7099 * @returns Strict VBox status code.
7100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7101 * @param pu64Dst Where to return the qword.
7102 * @param iSegReg The index of the segment register to use for
7103 * this access. The base and limits are checked.
7104 * @param GCPtrMem The address of the guest memory.
7105 */
7106VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7107{
7108 /* The lazy approach for now... */
7109 uint8_t bUnmapInfo;
7110 uint32_t const *pu32Src;
7111 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7112 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7113 if (rc == VINF_SUCCESS)
7114 {
7115 *pu64Dst = *pu32Src;
7116 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7117 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7118 }
7119 return rc;
7120}
7121
7122
7123#ifdef SOME_UNUSED_FUNCTION
7124/**
7125 * Fetches a data dword and sign extends it to a qword.
7126 *
7127 * @returns Strict VBox status code.
7128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7129 * @param pu64Dst Where to return the sign extended value.
7130 * @param iSegReg The index of the segment register to use for
7131 * this access. The base and limits are checked.
7132 * @param GCPtrMem The address of the guest memory.
7133 */
7134VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7135{
7136 /* The lazy approach for now... */
7137 uint8_t bUnmapInfo;
7138 int32_t const *pi32Src;
7139 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7140 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7141 if (rc == VINF_SUCCESS)
7142 {
7143 *pu64Dst = *pi32Src;
7144 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7145 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7146 }
7147#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7148 else
7149 *pu64Dst = 0;
7150#endif
7151 return rc;
7152}
7153#endif
7154
7155
7156/**
7157 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7158 * related.
7159 *
7160 * Raises \#GP(0) if not aligned.
7161 *
7162 * @returns Strict VBox status code.
7163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7164 * @param pu128Dst Where to return the qword.
7165 * @param iSegReg The index of the segment register to use for
7166 * this access. The base and limits are checked.
7167 * @param GCPtrMem The address of the guest memory.
7168 */
7169VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7170{
7171 /* The lazy approach for now... */
7172 uint8_t bUnmapInfo;
7173 PCRTUINT128U pu128Src;
7174 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,
7175 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7176 if (rc == VINF_SUCCESS)
7177 {
7178 pu128Dst->au64[0] = pu128Src->au64[0];
7179 pu128Dst->au64[1] = pu128Src->au64[1];
7180 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7181 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7182 }
7183 return rc;
7184}
7185
7186
7187#ifdef IEM_WITH_SETJMP
7188/**
7189 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7190 * related, longjmp on error.
7191 *
7192 * Raises \#GP(0) if not aligned.
7193 *
7194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7195 * @param pu128Dst Where to return the qword.
7196 * @param iSegReg The index of the segment register to use for
7197 * this access. The base and limits are checked.
7198 * @param GCPtrMem The address of the guest memory.
7199 */
7200void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7201 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7202{
7203 /* The lazy approach for now... */
7204 uint8_t bUnmapInfo;
7205 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7206 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7207 pu128Dst->au64[0] = pu128Src->au64[0];
7208 pu128Dst->au64[1] = pu128Src->au64[1];
7209 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7210 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7211}
7212#endif
7213
7214
7215/**
7216 * Fetches a data oword (octo word), generally AVX related.
7217 *
7218 * @returns Strict VBox status code.
7219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7220 * @param pu256Dst Where to return the qword.
7221 * @param iSegReg The index of the segment register to use for
7222 * this access. The base and limits are checked.
7223 * @param GCPtrMem The address of the guest memory.
7224 */
7225VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7226{
7227 /* The lazy approach for now... */
7228 uint8_t bUnmapInfo;
7229 PCRTUINT256U pu256Src;
7230 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7231 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7232 if (rc == VINF_SUCCESS)
7233 {
7234 pu256Dst->au64[0] = pu256Src->au64[0];
7235 pu256Dst->au64[1] = pu256Src->au64[1];
7236 pu256Dst->au64[2] = pu256Src->au64[2];
7237 pu256Dst->au64[3] = pu256Src->au64[3];
7238 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7239 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7240 }
7241 return rc;
7242}
7243
7244
7245#ifdef IEM_WITH_SETJMP
7246/**
7247 * Fetches a data oword (octo word), generally AVX related.
7248 *
7249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7250 * @param pu256Dst Where to return the qword.
7251 * @param iSegReg The index of the segment register to use for
7252 * this access. The base and limits are checked.
7253 * @param GCPtrMem The address of the guest memory.
7254 */
7255void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7256{
7257 /* The lazy approach for now... */
7258 uint8_t bUnmapInfo;
7259 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7260 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7261 pu256Dst->au64[0] = pu256Src->au64[0];
7262 pu256Dst->au64[1] = pu256Src->au64[1];
7263 pu256Dst->au64[2] = pu256Src->au64[2];
7264 pu256Dst->au64[3] = pu256Src->au64[3];
7265 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7266 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7267}
7268#endif
7269
7270
7271/**
7272 * Fetches a data oword (octo word) at an aligned address, generally AVX
7273 * related.
7274 *
7275 * Raises \#GP(0) if not aligned.
7276 *
7277 * @returns Strict VBox status code.
7278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7279 * @param pu256Dst Where to return the qword.
7280 * @param iSegReg The index of the segment register to use for
7281 * this access. The base and limits are checked.
7282 * @param GCPtrMem The address of the guest memory.
7283 */
7284VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7285{
7286 /* The lazy approach for now... */
7287 uint8_t bUnmapInfo;
7288 PCRTUINT256U pu256Src;
7289 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem,
7290 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7291 if (rc == VINF_SUCCESS)
7292 {
7293 pu256Dst->au64[0] = pu256Src->au64[0];
7294 pu256Dst->au64[1] = pu256Src->au64[1];
7295 pu256Dst->au64[2] = pu256Src->au64[2];
7296 pu256Dst->au64[3] = pu256Src->au64[3];
7297 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7298 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7299 }
7300 return rc;
7301}
7302
7303
7304#ifdef IEM_WITH_SETJMP
7305/**
7306 * Fetches a data oword (octo word) at an aligned address, generally AVX
7307 * related, longjmp on error.
7308 *
7309 * Raises \#GP(0) if not aligned.
7310 *
7311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7312 * @param pu256Dst Where to return the qword.
7313 * @param iSegReg The index of the segment register to use for
7314 * this access. The base and limits are checked.
7315 * @param GCPtrMem The address of the guest memory.
7316 */
7317void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7318 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7319{
7320 /* The lazy approach for now... */
7321 uint8_t bUnmapInfo;
7322 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7323 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7324 pu256Dst->au64[0] = pu256Src->au64[0];
7325 pu256Dst->au64[1] = pu256Src->au64[1];
7326 pu256Dst->au64[2] = pu256Src->au64[2];
7327 pu256Dst->au64[3] = pu256Src->au64[3];
7328 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7329 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7330}
7331#endif
7332
7333
7334
7335/**
7336 * Fetches a descriptor register (lgdt, lidt).
7337 *
7338 * @returns Strict VBox status code.
7339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7340 * @param pcbLimit Where to return the limit.
7341 * @param pGCPtrBase Where to return the base.
7342 * @param iSegReg The index of the segment register to use for
7343 * this access. The base and limits are checked.
7344 * @param GCPtrMem The address of the guest memory.
7345 * @param enmOpSize The effective operand size.
7346 */
7347VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7348 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7349{
7350 /*
7351 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7352 * little special:
7353 * - The two reads are done separately.
7354 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7355 * - We suspect the 386 to actually commit the limit before the base in
7356 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7357 * don't try emulate this eccentric behavior, because it's not well
7358 * enough understood and rather hard to trigger.
7359 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7360 */
7361 VBOXSTRICTRC rcStrict;
7362 if (IEM_IS_64BIT_CODE(pVCpu))
7363 {
7364 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7365 if (rcStrict == VINF_SUCCESS)
7366 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7367 }
7368 else
7369 {
7370 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7371 if (enmOpSize == IEMMODE_32BIT)
7372 {
7373 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7374 {
7375 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7376 if (rcStrict == VINF_SUCCESS)
7377 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7378 }
7379 else
7380 {
7381 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7382 if (rcStrict == VINF_SUCCESS)
7383 {
7384 *pcbLimit = (uint16_t)uTmp;
7385 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7386 }
7387 }
7388 if (rcStrict == VINF_SUCCESS)
7389 *pGCPtrBase = uTmp;
7390 }
7391 else
7392 {
7393 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7394 if (rcStrict == VINF_SUCCESS)
7395 {
7396 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7397 if (rcStrict == VINF_SUCCESS)
7398 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7399 }
7400 }
7401 }
7402 return rcStrict;
7403}
7404
7405
7406/**
7407 * Stores a data dqword, SSE aligned.
7408 *
7409 * @returns Strict VBox status code.
7410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7411 * @param iSegReg The index of the segment register to use for
7412 * this access. The base and limits are checked.
7413 * @param GCPtrMem The address of the guest memory.
7414 * @param u128Value The value to store.
7415 */
7416VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7417{
7418 /* The lazy approach for now... */
7419 uint8_t bUnmapInfo;
7420 PRTUINT128U pu128Dst;
7421 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7422 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7423 if (rc == VINF_SUCCESS)
7424 {
7425 pu128Dst->au64[0] = u128Value.au64[0];
7426 pu128Dst->au64[1] = u128Value.au64[1];
7427 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7428 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7429 }
7430 return rc;
7431}
7432
7433
7434#ifdef IEM_WITH_SETJMP
7435/**
7436 * Stores a data dqword, SSE aligned.
7437 *
7438 * @returns Strict VBox status code.
7439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7440 * @param iSegReg The index of the segment register to use for
7441 * this access. The base and limits are checked.
7442 * @param GCPtrMem The address of the guest memory.
7443 * @param u128Value The value to store.
7444 */
7445void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7446 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7447{
7448 /* The lazy approach for now... */
7449 uint8_t bUnmapInfo;
7450 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7451 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7452 pu128Dst->au64[0] = u128Value.au64[0];
7453 pu128Dst->au64[1] = u128Value.au64[1];
7454 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7455 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7456}
7457#endif
7458
7459
7460/**
7461 * Stores a data dqword.
7462 *
7463 * @returns Strict VBox status code.
7464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7465 * @param iSegReg The index of the segment register to use for
7466 * this access. The base and limits are checked.
7467 * @param GCPtrMem The address of the guest memory.
7468 * @param pu256Value Pointer to the value to store.
7469 */
7470VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7471{
7472 /* The lazy approach for now... */
7473 uint8_t bUnmapInfo;
7474 PRTUINT256U pu256Dst;
7475 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7476 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7477 if (rc == VINF_SUCCESS)
7478 {
7479 pu256Dst->au64[0] = pu256Value->au64[0];
7480 pu256Dst->au64[1] = pu256Value->au64[1];
7481 pu256Dst->au64[2] = pu256Value->au64[2];
7482 pu256Dst->au64[3] = pu256Value->au64[3];
7483 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7484 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7485 }
7486 return rc;
7487}
7488
7489
7490#ifdef IEM_WITH_SETJMP
7491/**
7492 * Stores a data dqword, longjmp on error.
7493 *
7494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7495 * @param iSegReg The index of the segment register to use for
7496 * this access. The base and limits are checked.
7497 * @param GCPtrMem The address of the guest memory.
7498 * @param pu256Value Pointer to the value to store.
7499 */
7500void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7501{
7502 /* The lazy approach for now... */
7503 uint8_t bUnmapInfo;
7504 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7505 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7506 pu256Dst->au64[0] = pu256Value->au64[0];
7507 pu256Dst->au64[1] = pu256Value->au64[1];
7508 pu256Dst->au64[2] = pu256Value->au64[2];
7509 pu256Dst->au64[3] = pu256Value->au64[3];
7510 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7511 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7512}
7513#endif
7514
7515
7516/**
7517 * Stores a data dqword, AVX \#GP(0) aligned.
7518 *
7519 * @returns Strict VBox status code.
7520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7521 * @param iSegReg The index of the segment register to use for
7522 * this access. The base and limits are checked.
7523 * @param GCPtrMem The address of the guest memory.
7524 * @param pu256Value Pointer to the value to store.
7525 */
7526VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7527{
7528 /* The lazy approach for now... */
7529 uint8_t bUnmapInfo;
7530 PRTUINT256U pu256Dst;
7531 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7532 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7533 if (rc == VINF_SUCCESS)
7534 {
7535 pu256Dst->au64[0] = pu256Value->au64[0];
7536 pu256Dst->au64[1] = pu256Value->au64[1];
7537 pu256Dst->au64[2] = pu256Value->au64[2];
7538 pu256Dst->au64[3] = pu256Value->au64[3];
7539 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7540 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7541 }
7542 return rc;
7543}
7544
7545
7546#ifdef IEM_WITH_SETJMP
7547/**
7548 * Stores a data dqword, AVX aligned.
7549 *
7550 * @returns Strict VBox status code.
7551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7552 * @param iSegReg The index of the segment register to use for
7553 * this access. The base and limits are checked.
7554 * @param GCPtrMem The address of the guest memory.
7555 * @param pu256Value Pointer to the value to store.
7556 */
7557void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7558 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7559{
7560 /* The lazy approach for now... */
7561 uint8_t bUnmapInfo;
7562 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7563 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7564 pu256Dst->au64[0] = pu256Value->au64[0];
7565 pu256Dst->au64[1] = pu256Value->au64[1];
7566 pu256Dst->au64[2] = pu256Value->au64[2];
7567 pu256Dst->au64[3] = pu256Value->au64[3];
7568 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7569 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7570}
7571#endif
7572
7573
7574/**
7575 * Stores a descriptor register (sgdt, sidt).
7576 *
7577 * @returns Strict VBox status code.
7578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7579 * @param cbLimit The limit.
7580 * @param GCPtrBase The base address.
7581 * @param iSegReg The index of the segment register to use for
7582 * this access. The base and limits are checked.
7583 * @param GCPtrMem The address of the guest memory.
7584 */
7585VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7586{
7587 /*
7588 * The SIDT and SGDT instructions actually stores the data using two
7589 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7590 * does not respond to opsize prefixes.
7591 */
7592 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7593 if (rcStrict == VINF_SUCCESS)
7594 {
7595 if (IEM_IS_16BIT_CODE(pVCpu))
7596 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7597 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7598 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7599 else if (IEM_IS_32BIT_CODE(pVCpu))
7600 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7601 else
7602 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7603 }
7604 return rcStrict;
7605}
7606
7607
7608/**
7609 * Begin a special stack push (used by interrupt, exceptions and such).
7610 *
7611 * This will raise \#SS or \#PF if appropriate.
7612 *
7613 * @returns Strict VBox status code.
7614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7615 * @param cbMem The number of bytes to push onto the stack.
7616 * @param cbAlign The alignment mask (7, 3, 1).
7617 * @param ppvMem Where to return the pointer to the stack memory.
7618 * As with the other memory functions this could be
7619 * direct access or bounce buffered access, so
7620 * don't commit register until the commit call
7621 * succeeds.
7622 * @param pbUnmapInfo Where to store unmap info for
7623 * iemMemStackPushCommitSpecial.
7624 * @param puNewRsp Where to return the new RSP value. This must be
7625 * passed unchanged to
7626 * iemMemStackPushCommitSpecial().
7627 */
7628VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7629 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7630{
7631 Assert(cbMem < UINT8_MAX);
7632 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7633 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7634}
7635
7636
7637/**
7638 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7639 *
7640 * This will update the rSP.
7641 *
7642 * @returns Strict VBox status code.
7643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7644 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7645 * @param uNewRsp The new RSP value returned by
7646 * iemMemStackPushBeginSpecial().
7647 */
7648VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7649{
7650 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7651 if (rcStrict == VINF_SUCCESS)
7652 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7653 return rcStrict;
7654}
7655
7656
7657/**
7658 * Begin a special stack pop (used by iret, retf and such).
7659 *
7660 * This will raise \#SS or \#PF if appropriate.
7661 *
7662 * @returns Strict VBox status code.
7663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7664 * @param cbMem The number of bytes to pop from the stack.
7665 * @param cbAlign The alignment mask (7, 3, 1).
7666 * @param ppvMem Where to return the pointer to the stack memory.
7667 * @param pbUnmapInfo Where to store unmap info for
7668 * iemMemStackPopDoneSpecial.
7669 * @param puNewRsp Where to return the new RSP value. This must be
7670 * assigned to CPUMCTX::rsp manually some time
7671 * after iemMemStackPopDoneSpecial() has been
7672 * called.
7673 */
7674VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7675 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7676{
7677 Assert(cbMem < UINT8_MAX);
7678 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7679 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7680}
7681
7682
7683/**
7684 * Continue a special stack pop (used by iret and retf), for the purpose of
7685 * retrieving a new stack pointer.
7686 *
7687 * This will raise \#SS or \#PF if appropriate.
7688 *
7689 * @returns Strict VBox status code.
7690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7691 * @param off Offset from the top of the stack. This is zero
7692 * except in the retf case.
7693 * @param cbMem The number of bytes to pop from the stack.
7694 * @param ppvMem Where to return the pointer to the stack memory.
7695 * @param pbUnmapInfo Where to store unmap info for
7696 * iemMemStackPopDoneSpecial.
7697 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7698 * return this because all use of this function is
7699 * to retrieve a new value and anything we return
7700 * here would be discarded.)
7701 */
7702VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7703 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7704{
7705 Assert(cbMem < UINT8_MAX);
7706
7707 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7708 RTGCPTR GCPtrTop;
7709 if (IEM_IS_64BIT_CODE(pVCpu))
7710 GCPtrTop = uCurNewRsp;
7711 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7712 GCPtrTop = (uint32_t)uCurNewRsp;
7713 else
7714 GCPtrTop = (uint16_t)uCurNewRsp;
7715
7716 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7717 0 /* checked in iemMemStackPopBeginSpecial */);
7718}
7719
7720
7721/**
7722 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7723 * iemMemStackPopContinueSpecial).
7724 *
7725 * The caller will manually commit the rSP.
7726 *
7727 * @returns Strict VBox status code.
7728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7729 * @param bUnmapInfo Unmap information returned by
7730 * iemMemStackPopBeginSpecial() or
7731 * iemMemStackPopContinueSpecial().
7732 */
7733VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7734{
7735 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7736}
7737
7738
7739/**
7740 * Fetches a system table byte.
7741 *
7742 * @returns Strict VBox status code.
7743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7744 * @param pbDst Where to return the byte.
7745 * @param iSegReg The index of the segment register to use for
7746 * this access. The base and limits are checked.
7747 * @param GCPtrMem The address of the guest memory.
7748 */
7749VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7750{
7751 /* The lazy approach for now... */
7752 uint8_t bUnmapInfo;
7753 uint8_t const *pbSrc;
7754 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7755 if (rc == VINF_SUCCESS)
7756 {
7757 *pbDst = *pbSrc;
7758 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7759 }
7760 return rc;
7761}
7762
7763
7764/**
7765 * Fetches a system table word.
7766 *
7767 * @returns Strict VBox status code.
7768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7769 * @param pu16Dst Where to return the word.
7770 * @param iSegReg The index of the segment register to use for
7771 * this access. The base and limits are checked.
7772 * @param GCPtrMem The address of the guest memory.
7773 */
7774VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7775{
7776 /* The lazy approach for now... */
7777 uint8_t bUnmapInfo;
7778 uint16_t const *pu16Src;
7779 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7780 if (rc == VINF_SUCCESS)
7781 {
7782 *pu16Dst = *pu16Src;
7783 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7784 }
7785 return rc;
7786}
7787
7788
7789/**
7790 * Fetches a system table dword.
7791 *
7792 * @returns Strict VBox status code.
7793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7794 * @param pu32Dst Where to return the dword.
7795 * @param iSegReg The index of the segment register to use for
7796 * this access. The base and limits are checked.
7797 * @param GCPtrMem The address of the guest memory.
7798 */
7799VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7800{
7801 /* The lazy approach for now... */
7802 uint8_t bUnmapInfo;
7803 uint32_t const *pu32Src;
7804 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7805 if (rc == VINF_SUCCESS)
7806 {
7807 *pu32Dst = *pu32Src;
7808 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7809 }
7810 return rc;
7811}
7812
7813
7814/**
7815 * Fetches a system table qword.
7816 *
7817 * @returns Strict VBox status code.
7818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7819 * @param pu64Dst Where to return the qword.
7820 * @param iSegReg The index of the segment register to use for
7821 * this access. The base and limits are checked.
7822 * @param GCPtrMem The address of the guest memory.
7823 */
7824VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7825{
7826 /* The lazy approach for now... */
7827 uint8_t bUnmapInfo;
7828 uint64_t const *pu64Src;
7829 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7830 if (rc == VINF_SUCCESS)
7831 {
7832 *pu64Dst = *pu64Src;
7833 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7834 }
7835 return rc;
7836}
7837
7838
7839/**
7840 * Fetches a descriptor table entry with caller specified error code.
7841 *
7842 * @returns Strict VBox status code.
7843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7844 * @param pDesc Where to return the descriptor table entry.
7845 * @param uSel The selector which table entry to fetch.
7846 * @param uXcpt The exception to raise on table lookup error.
7847 * @param uErrorCode The error code associated with the exception.
7848 */
7849static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7850 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7851{
7852 AssertPtr(pDesc);
7853 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7854
7855 /** @todo did the 286 require all 8 bytes to be accessible? */
7856 /*
7857 * Get the selector table base and check bounds.
7858 */
7859 RTGCPTR GCPtrBase;
7860 if (uSel & X86_SEL_LDT)
7861 {
7862 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7863 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7864 {
7865 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7866 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7867 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7868 uErrorCode, 0);
7869 }
7870
7871 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7872 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7873 }
7874 else
7875 {
7876 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7877 {
7878 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7879 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7880 uErrorCode, 0);
7881 }
7882 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7883 }
7884
7885 /*
7886 * Read the legacy descriptor and maybe the long mode extensions if
7887 * required.
7888 */
7889 VBOXSTRICTRC rcStrict;
7890 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7891 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7892 else
7893 {
7894 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7895 if (rcStrict == VINF_SUCCESS)
7896 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7897 if (rcStrict == VINF_SUCCESS)
7898 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7899 if (rcStrict == VINF_SUCCESS)
7900 pDesc->Legacy.au16[3] = 0;
7901 else
7902 return rcStrict;
7903 }
7904
7905 if (rcStrict == VINF_SUCCESS)
7906 {
7907 if ( !IEM_IS_LONG_MODE(pVCpu)
7908 || pDesc->Legacy.Gen.u1DescType)
7909 pDesc->Long.au64[1] = 0;
7910 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7911 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7912 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7913 else
7914 {
7915 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7916 /** @todo is this the right exception? */
7917 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7918 }
7919 }
7920 return rcStrict;
7921}
7922
7923
7924/**
7925 * Fetches a descriptor table entry.
7926 *
7927 * @returns Strict VBox status code.
7928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7929 * @param pDesc Where to return the descriptor table entry.
7930 * @param uSel The selector which table entry to fetch.
7931 * @param uXcpt The exception to raise on table lookup error.
7932 */
7933VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
7934{
7935 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
7936}
7937
7938
7939/**
7940 * Marks the selector descriptor as accessed (only non-system descriptors).
7941 *
7942 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7943 * will therefore skip the limit checks.
7944 *
7945 * @returns Strict VBox status code.
7946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7947 * @param uSel The selector.
7948 */
7949VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
7950{
7951 /*
7952 * Get the selector table base and calculate the entry address.
7953 */
7954 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7955 ? pVCpu->cpum.GstCtx.ldtr.u64Base
7956 : pVCpu->cpum.GstCtx.gdtr.pGdt;
7957 GCPtr += uSel & X86_SEL_MASK;
7958
7959 /*
7960 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7961 * ugly stuff to avoid this. This will make sure it's an atomic access
7962 * as well more or less remove any question about 8-bit or 32-bit accesss.
7963 */
7964 VBOXSTRICTRC rcStrict;
7965 uint8_t bUnmapInfo;
7966 uint32_t volatile *pu32;
7967 if ((GCPtr & 3) == 0)
7968 {
7969 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7970 GCPtr += 2 + 2;
7971 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7972 if (rcStrict != VINF_SUCCESS)
7973 return rcStrict;
7974 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7975 }
7976 else
7977 {
7978 /* The misaligned GDT/LDT case, map the whole thing. */
7979 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7980 if (rcStrict != VINF_SUCCESS)
7981 return rcStrict;
7982 switch ((uintptr_t)pu32 & 3)
7983 {
7984 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7985 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7986 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7987 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7988 }
7989 }
7990
7991 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7992}
7993
7994
7995#undef LOG_GROUP
7996#define LOG_GROUP LOG_GROUP_IEM
7997
7998/** @} */
7999
8000/** @name Opcode Helpers.
8001 * @{
8002 */
8003
8004/**
8005 * Calculates the effective address of a ModR/M memory operand.
8006 *
8007 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8008 *
8009 * @return Strict VBox status code.
8010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8011 * @param bRm The ModRM byte.
8012 * @param cbImmAndRspOffset - First byte: The size of any immediate
8013 * following the effective address opcode bytes
8014 * (only for RIP relative addressing).
8015 * - Second byte: RSP displacement (for POP [ESP]).
8016 * @param pGCPtrEff Where to return the effective address.
8017 */
8018VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8019{
8020 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8021# define SET_SS_DEF() \
8022 do \
8023 { \
8024 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8025 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8026 } while (0)
8027
8028 if (!IEM_IS_64BIT_CODE(pVCpu))
8029 {
8030/** @todo Check the effective address size crap! */
8031 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8032 {
8033 uint16_t u16EffAddr;
8034
8035 /* Handle the disp16 form with no registers first. */
8036 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8037 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8038 else
8039 {
8040 /* Get the displacment. */
8041 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8042 {
8043 case 0: u16EffAddr = 0; break;
8044 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8045 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8046 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8047 }
8048
8049 /* Add the base and index registers to the disp. */
8050 switch (bRm & X86_MODRM_RM_MASK)
8051 {
8052 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8053 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8054 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8055 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8056 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8057 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8058 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8059 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8060 }
8061 }
8062
8063 *pGCPtrEff = u16EffAddr;
8064 }
8065 else
8066 {
8067 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8068 uint32_t u32EffAddr;
8069
8070 /* Handle the disp32 form with no registers first. */
8071 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8072 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8073 else
8074 {
8075 /* Get the register (or SIB) value. */
8076 switch ((bRm & X86_MODRM_RM_MASK))
8077 {
8078 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8079 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8080 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8081 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8082 case 4: /* SIB */
8083 {
8084 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8085
8086 /* Get the index and scale it. */
8087 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8088 {
8089 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8090 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8091 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8092 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8093 case 4: u32EffAddr = 0; /*none */ break;
8094 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8095 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8096 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8097 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8098 }
8099 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8100
8101 /* add base */
8102 switch (bSib & X86_SIB_BASE_MASK)
8103 {
8104 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8105 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8106 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8107 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8108 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8109 case 5:
8110 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8111 {
8112 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8113 SET_SS_DEF();
8114 }
8115 else
8116 {
8117 uint32_t u32Disp;
8118 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8119 u32EffAddr += u32Disp;
8120 }
8121 break;
8122 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8123 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8124 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8125 }
8126 break;
8127 }
8128 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8129 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8130 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8131 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8132 }
8133
8134 /* Get and add the displacement. */
8135 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8136 {
8137 case 0:
8138 break;
8139 case 1:
8140 {
8141 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8142 u32EffAddr += i8Disp;
8143 break;
8144 }
8145 case 2:
8146 {
8147 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8148 u32EffAddr += u32Disp;
8149 break;
8150 }
8151 default:
8152 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8153 }
8154
8155 }
8156 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8157 *pGCPtrEff = u32EffAddr;
8158 }
8159 }
8160 else
8161 {
8162 uint64_t u64EffAddr;
8163
8164 /* Handle the rip+disp32 form with no registers first. */
8165 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8166 {
8167 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8168 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8169 }
8170 else
8171 {
8172 /* Get the register (or SIB) value. */
8173 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8174 {
8175 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8176 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8177 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8178 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8179 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8180 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8181 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8182 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8183 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8184 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8185 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8186 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8187 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8188 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8189 /* SIB */
8190 case 4:
8191 case 12:
8192 {
8193 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8194
8195 /* Get the index and scale it. */
8196 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8197 {
8198 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8199 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8200 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8201 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8202 case 4: u64EffAddr = 0; /*none */ break;
8203 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8204 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8205 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8206 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8207 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8208 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8209 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8210 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8211 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8212 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8213 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8214 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8215 }
8216 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8217
8218 /* add base */
8219 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8220 {
8221 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8222 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8223 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8224 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8225 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8226 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8227 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8228 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8229 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8230 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8231 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8232 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8233 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8234 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8235 /* complicated encodings */
8236 case 5:
8237 case 13:
8238 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8239 {
8240 if (!pVCpu->iem.s.uRexB)
8241 {
8242 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8243 SET_SS_DEF();
8244 }
8245 else
8246 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8247 }
8248 else
8249 {
8250 uint32_t u32Disp;
8251 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8252 u64EffAddr += (int32_t)u32Disp;
8253 }
8254 break;
8255 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8256 }
8257 break;
8258 }
8259 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8260 }
8261
8262 /* Get and add the displacement. */
8263 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8264 {
8265 case 0:
8266 break;
8267 case 1:
8268 {
8269 int8_t i8Disp;
8270 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8271 u64EffAddr += i8Disp;
8272 break;
8273 }
8274 case 2:
8275 {
8276 uint32_t u32Disp;
8277 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8278 u64EffAddr += (int32_t)u32Disp;
8279 break;
8280 }
8281 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8282 }
8283
8284 }
8285
8286 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8287 *pGCPtrEff = u64EffAddr;
8288 else
8289 {
8290 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8291 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8292 }
8293 }
8294
8295 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8296 return VINF_SUCCESS;
8297}
8298
8299
8300#ifdef IEM_WITH_SETJMP
8301/**
8302 * Calculates the effective address of a ModR/M memory operand.
8303 *
8304 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8305 *
8306 * May longjmp on internal error.
8307 *
8308 * @return The effective address.
8309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8310 * @param bRm The ModRM byte.
8311 * @param cbImmAndRspOffset - First byte: The size of any immediate
8312 * following the effective address opcode bytes
8313 * (only for RIP relative addressing).
8314 * - Second byte: RSP displacement (for POP [ESP]).
8315 */
8316RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8317{
8318 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8319# define SET_SS_DEF() \
8320 do \
8321 { \
8322 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8323 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8324 } while (0)
8325
8326 if (!IEM_IS_64BIT_CODE(pVCpu))
8327 {
8328/** @todo Check the effective address size crap! */
8329 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8330 {
8331 uint16_t u16EffAddr;
8332
8333 /* Handle the disp16 form with no registers first. */
8334 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8335 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8336 else
8337 {
8338 /* Get the displacment. */
8339 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8340 {
8341 case 0: u16EffAddr = 0; break;
8342 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8343 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8344 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8345 }
8346
8347 /* Add the base and index registers to the disp. */
8348 switch (bRm & X86_MODRM_RM_MASK)
8349 {
8350 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8351 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8352 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8353 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8354 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8355 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8356 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8357 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8358 }
8359 }
8360
8361 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8362 return u16EffAddr;
8363 }
8364
8365 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8366 uint32_t u32EffAddr;
8367
8368 /* Handle the disp32 form with no registers first. */
8369 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8370 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8371 else
8372 {
8373 /* Get the register (or SIB) value. */
8374 switch ((bRm & X86_MODRM_RM_MASK))
8375 {
8376 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8377 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8378 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8379 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8380 case 4: /* SIB */
8381 {
8382 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8383
8384 /* Get the index and scale it. */
8385 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8386 {
8387 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8388 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8389 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8390 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8391 case 4: u32EffAddr = 0; /*none */ break;
8392 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8393 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8394 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8395 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8396 }
8397 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8398
8399 /* add base */
8400 switch (bSib & X86_SIB_BASE_MASK)
8401 {
8402 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8403 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8404 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8405 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8406 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8407 case 5:
8408 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8409 {
8410 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8411 SET_SS_DEF();
8412 }
8413 else
8414 {
8415 uint32_t u32Disp;
8416 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8417 u32EffAddr += u32Disp;
8418 }
8419 break;
8420 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8421 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8422 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8423 }
8424 break;
8425 }
8426 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8427 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8428 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8429 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8430 }
8431
8432 /* Get and add the displacement. */
8433 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8434 {
8435 case 0:
8436 break;
8437 case 1:
8438 {
8439 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8440 u32EffAddr += i8Disp;
8441 break;
8442 }
8443 case 2:
8444 {
8445 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8446 u32EffAddr += u32Disp;
8447 break;
8448 }
8449 default:
8450 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8451 }
8452 }
8453
8454 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8455 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8456 return u32EffAddr;
8457 }
8458
8459 uint64_t u64EffAddr;
8460
8461 /* Handle the rip+disp32 form with no registers first. */
8462 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8463 {
8464 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8465 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8466 }
8467 else
8468 {
8469 /* Get the register (or SIB) value. */
8470 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8471 {
8472 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8473 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8474 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8475 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8476 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8477 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8478 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8479 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8480 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8481 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8482 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8483 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8484 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8485 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8486 /* SIB */
8487 case 4:
8488 case 12:
8489 {
8490 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8491
8492 /* Get the index and scale it. */
8493 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8494 {
8495 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8496 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8497 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8498 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8499 case 4: u64EffAddr = 0; /*none */ break;
8500 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8501 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8502 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8503 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8504 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8505 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8506 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8507 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8508 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8509 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8510 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8511 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8512 }
8513 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8514
8515 /* add base */
8516 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8517 {
8518 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8519 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8520 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8521 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8522 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8523 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8524 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8525 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8526 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8527 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8528 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8529 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8530 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8531 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8532 /* complicated encodings */
8533 case 5:
8534 case 13:
8535 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8536 {
8537 if (!pVCpu->iem.s.uRexB)
8538 {
8539 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8540 SET_SS_DEF();
8541 }
8542 else
8543 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8544 }
8545 else
8546 {
8547 uint32_t u32Disp;
8548 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8549 u64EffAddr += (int32_t)u32Disp;
8550 }
8551 break;
8552 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8553 }
8554 break;
8555 }
8556 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8557 }
8558
8559 /* Get and add the displacement. */
8560 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8561 {
8562 case 0:
8563 break;
8564 case 1:
8565 {
8566 int8_t i8Disp;
8567 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8568 u64EffAddr += i8Disp;
8569 break;
8570 }
8571 case 2:
8572 {
8573 uint32_t u32Disp;
8574 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8575 u64EffAddr += (int32_t)u32Disp;
8576 break;
8577 }
8578 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8579 }
8580
8581 }
8582
8583 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8584 {
8585 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8586 return u64EffAddr;
8587 }
8588 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8589 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8590 return u64EffAddr & UINT32_MAX;
8591}
8592#endif /* IEM_WITH_SETJMP */
8593
8594
8595/**
8596 * Calculates the effective address of a ModR/M memory operand, extended version
8597 * for use in the recompilers.
8598 *
8599 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8600 *
8601 * @return Strict VBox status code.
8602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8603 * @param bRm The ModRM byte.
8604 * @param cbImmAndRspOffset - First byte: The size of any immediate
8605 * following the effective address opcode bytes
8606 * (only for RIP relative addressing).
8607 * - Second byte: RSP displacement (for POP [ESP]).
8608 * @param pGCPtrEff Where to return the effective address.
8609 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8610 * SIB byte (bits 39:32).
8611 */
8612VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8613{
8614 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8615# define SET_SS_DEF() \
8616 do \
8617 { \
8618 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8619 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8620 } while (0)
8621
8622 uint64_t uInfo;
8623 if (!IEM_IS_64BIT_CODE(pVCpu))
8624 {
8625/** @todo Check the effective address size crap! */
8626 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8627 {
8628 uint16_t u16EffAddr;
8629
8630 /* Handle the disp16 form with no registers first. */
8631 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8632 {
8633 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8634 uInfo = u16EffAddr;
8635 }
8636 else
8637 {
8638 /* Get the displacment. */
8639 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8640 {
8641 case 0: u16EffAddr = 0; break;
8642 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8643 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8644 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8645 }
8646 uInfo = u16EffAddr;
8647
8648 /* Add the base and index registers to the disp. */
8649 switch (bRm & X86_MODRM_RM_MASK)
8650 {
8651 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8652 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8653 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8654 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8655 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8656 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8657 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8658 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8659 }
8660 }
8661
8662 *pGCPtrEff = u16EffAddr;
8663 }
8664 else
8665 {
8666 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8667 uint32_t u32EffAddr;
8668
8669 /* Handle the disp32 form with no registers first. */
8670 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8671 {
8672 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8673 uInfo = u32EffAddr;
8674 }
8675 else
8676 {
8677 /* Get the register (or SIB) value. */
8678 uInfo = 0;
8679 switch ((bRm & X86_MODRM_RM_MASK))
8680 {
8681 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8682 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8683 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8684 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8685 case 4: /* SIB */
8686 {
8687 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8688 uInfo = (uint64_t)bSib << 32;
8689
8690 /* Get the index and scale it. */
8691 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8692 {
8693 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8694 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8695 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8696 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8697 case 4: u32EffAddr = 0; /*none */ break;
8698 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8699 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8700 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8701 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8702 }
8703 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8704
8705 /* add base */
8706 switch (bSib & X86_SIB_BASE_MASK)
8707 {
8708 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8709 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8710 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8711 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8712 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8713 case 5:
8714 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8715 {
8716 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8717 SET_SS_DEF();
8718 }
8719 else
8720 {
8721 uint32_t u32Disp;
8722 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8723 u32EffAddr += u32Disp;
8724 uInfo |= u32Disp;
8725 }
8726 break;
8727 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8728 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8730 }
8731 break;
8732 }
8733 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8734 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8735 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8737 }
8738
8739 /* Get and add the displacement. */
8740 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8741 {
8742 case 0:
8743 break;
8744 case 1:
8745 {
8746 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8747 u32EffAddr += i8Disp;
8748 uInfo |= (uint32_t)(int32_t)i8Disp;
8749 break;
8750 }
8751 case 2:
8752 {
8753 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8754 u32EffAddr += u32Disp;
8755 uInfo |= (uint32_t)u32Disp;
8756 break;
8757 }
8758 default:
8759 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8760 }
8761
8762 }
8763 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8764 *pGCPtrEff = u32EffAddr;
8765 }
8766 }
8767 else
8768 {
8769 uint64_t u64EffAddr;
8770
8771 /* Handle the rip+disp32 form with no registers first. */
8772 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8773 {
8774 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8775 uInfo = (uint32_t)u64EffAddr;
8776 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8777 }
8778 else
8779 {
8780 /* Get the register (or SIB) value. */
8781 uInfo = 0;
8782 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8783 {
8784 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8785 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8786 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8787 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8788 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8789 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8790 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8791 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8792 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8793 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8794 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8795 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8796 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8797 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8798 /* SIB */
8799 case 4:
8800 case 12:
8801 {
8802 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8803 uInfo = (uint64_t)bSib << 32;
8804
8805 /* Get the index and scale it. */
8806 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8807 {
8808 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8809 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8810 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8811 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8812 case 4: u64EffAddr = 0; /*none */ break;
8813 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8814 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8815 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8816 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8817 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8818 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8819 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8820 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8821 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8822 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8823 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8824 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8825 }
8826 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8827
8828 /* add base */
8829 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8830 {
8831 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8832 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8833 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8834 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8835 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8836 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8837 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8838 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8839 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8840 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8841 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8842 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8843 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8844 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8845 /* complicated encodings */
8846 case 5:
8847 case 13:
8848 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8849 {
8850 if (!pVCpu->iem.s.uRexB)
8851 {
8852 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8853 SET_SS_DEF();
8854 }
8855 else
8856 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8857 }
8858 else
8859 {
8860 uint32_t u32Disp;
8861 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8862 u64EffAddr += (int32_t)u32Disp;
8863 uInfo |= u32Disp;
8864 }
8865 break;
8866 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8867 }
8868 break;
8869 }
8870 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8871 }
8872
8873 /* Get and add the displacement. */
8874 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8875 {
8876 case 0:
8877 break;
8878 case 1:
8879 {
8880 int8_t i8Disp;
8881 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8882 u64EffAddr += i8Disp;
8883 uInfo |= (uint32_t)(int32_t)i8Disp;
8884 break;
8885 }
8886 case 2:
8887 {
8888 uint32_t u32Disp;
8889 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8890 u64EffAddr += (int32_t)u32Disp;
8891 uInfo |= u32Disp;
8892 break;
8893 }
8894 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8895 }
8896
8897 }
8898
8899 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8900 *pGCPtrEff = u64EffAddr;
8901 else
8902 {
8903 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8904 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8905 }
8906 }
8907 *puInfo = uInfo;
8908
8909 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8910 return VINF_SUCCESS;
8911}
8912
8913/** @} */
8914
8915
8916#ifdef LOG_ENABLED
8917/**
8918 * Logs the current instruction.
8919 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8920 * @param fSameCtx Set if we have the same context information as the VMM,
8921 * clear if we may have already executed an instruction in
8922 * our debug context. When clear, we assume IEMCPU holds
8923 * valid CPU mode info.
8924 *
8925 * The @a fSameCtx parameter is now misleading and obsolete.
8926 * @param pszFunction The IEM function doing the execution.
8927 */
8928static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8929{
8930# ifdef IN_RING3
8931 if (LogIs2Enabled())
8932 {
8933 char szInstr[256];
8934 uint32_t cbInstr = 0;
8935 if (fSameCtx)
8936 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8937 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8938 szInstr, sizeof(szInstr), &cbInstr);
8939 else
8940 {
8941 uint32_t fFlags = 0;
8942 switch (IEM_GET_CPU_MODE(pVCpu))
8943 {
8944 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
8945 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
8946 case IEMMODE_16BIT:
8947 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
8948 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
8949 else
8950 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
8951 break;
8952 }
8953 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
8954 szInstr, sizeof(szInstr), &cbInstr);
8955 }
8956
8957 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8958 Log2(("**** %s fExec=%x\n"
8959 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8960 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
8961 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8962 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8963 " %s\n"
8964 , pszFunction, pVCpu->iem.s.fExec,
8965 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
8966 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
8967 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
8968 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
8969 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
8970 szInstr));
8971
8972 /* This stuff sucks atm. as it fills the log with MSRs. */
8973 //if (LogIs3Enabled())
8974 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
8975 }
8976 else
8977# endif
8978 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
8979 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
8980 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
8981}
8982#endif /* LOG_ENABLED */
8983
8984
8985#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8986/**
8987 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
8988 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
8989 *
8990 * @returns Modified rcStrict.
8991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8992 * @param rcStrict The instruction execution status.
8993 */
8994static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
8995{
8996 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
8997 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
8998 {
8999 /* VMX preemption timer takes priority over NMI-window exits. */
9000 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9001 {
9002 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9003 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9004 }
9005 /*
9006 * Check remaining intercepts.
9007 *
9008 * NMI-window and Interrupt-window VM-exits.
9009 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9010 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9011 *
9012 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9013 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9014 */
9015 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9016 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9017 && !TRPMHasTrap(pVCpu))
9018 {
9019 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9020 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9021 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9022 {
9023 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9024 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9025 }
9026 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9027 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9028 {
9029 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9030 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9031 }
9032 }
9033 }
9034 /* TPR-below threshold/APIC write has the highest priority. */
9035 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9036 {
9037 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9038 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9039 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9040 }
9041 /* MTF takes priority over VMX-preemption timer. */
9042 else
9043 {
9044 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9045 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9046 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9047 }
9048 return rcStrict;
9049}
9050#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9051
9052
9053/**
9054 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9055 * IEMExecOneWithPrefetchedByPC.
9056 *
9057 * Similar code is found in IEMExecLots.
9058 *
9059 * @return Strict VBox status code.
9060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9061 * @param fExecuteInhibit If set, execute the instruction following CLI,
9062 * POP SS and MOV SS,GR.
9063 * @param pszFunction The calling function name.
9064 */
9065DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9066{
9067 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9068 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9069 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9070 RT_NOREF_PV(pszFunction);
9071
9072#ifdef IEM_WITH_SETJMP
9073 VBOXSTRICTRC rcStrict;
9074 IEM_TRY_SETJMP(pVCpu, rcStrict)
9075 {
9076 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9077 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9078 }
9079 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9080 {
9081 pVCpu->iem.s.cLongJumps++;
9082 }
9083 IEM_CATCH_LONGJMP_END(pVCpu);
9084#else
9085 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9086 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9087#endif
9088 if (rcStrict == VINF_SUCCESS)
9089 pVCpu->iem.s.cInstructions++;
9090 if (pVCpu->iem.s.cActiveMappings > 0)
9091 {
9092 Assert(rcStrict != VINF_SUCCESS);
9093 iemMemRollback(pVCpu);
9094 }
9095 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9096 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9097 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9098
9099//#ifdef DEBUG
9100// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9101//#endif
9102
9103#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9104 /*
9105 * Perform any VMX nested-guest instruction boundary actions.
9106 *
9107 * If any of these causes a VM-exit, we must skip executing the next
9108 * instruction (would run into stale page tables). A VM-exit makes sure
9109 * there is no interrupt-inhibition, so that should ensure we don't go
9110 * to try execute the next instruction. Clearing fExecuteInhibit is
9111 * problematic because of the setjmp/longjmp clobbering above.
9112 */
9113 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9114 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9115 || rcStrict != VINF_SUCCESS)
9116 { /* likely */ }
9117 else
9118 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9119#endif
9120
9121 /* Execute the next instruction as well if a cli, pop ss or
9122 mov ss, Gr has just completed successfully. */
9123 if ( fExecuteInhibit
9124 && rcStrict == VINF_SUCCESS
9125 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9126 {
9127 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9128 if (rcStrict == VINF_SUCCESS)
9129 {
9130#ifdef LOG_ENABLED
9131 iemLogCurInstr(pVCpu, false, pszFunction);
9132#endif
9133#ifdef IEM_WITH_SETJMP
9134 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9135 {
9136 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9137 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9138 }
9139 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9140 {
9141 pVCpu->iem.s.cLongJumps++;
9142 }
9143 IEM_CATCH_LONGJMP_END(pVCpu);
9144#else
9145 IEM_OPCODE_GET_FIRST_U8(&b);
9146 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9147#endif
9148 if (rcStrict == VINF_SUCCESS)
9149 {
9150 pVCpu->iem.s.cInstructions++;
9151#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9152 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9153 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9154 { /* likely */ }
9155 else
9156 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9157#endif
9158 }
9159 if (pVCpu->iem.s.cActiveMappings > 0)
9160 {
9161 Assert(rcStrict != VINF_SUCCESS);
9162 iemMemRollback(pVCpu);
9163 }
9164 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9165 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9166 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9167 }
9168 else if (pVCpu->iem.s.cActiveMappings > 0)
9169 iemMemRollback(pVCpu);
9170 /** @todo drop this after we bake this change into RIP advancing. */
9171 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9172 }
9173
9174 /*
9175 * Return value fiddling, statistics and sanity assertions.
9176 */
9177 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9178
9179 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9181 return rcStrict;
9182}
9183
9184
9185/**
9186 * Execute one instruction.
9187 *
9188 * @return Strict VBox status code.
9189 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9190 */
9191VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9192{
9193 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9194#ifdef LOG_ENABLED
9195 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9196#endif
9197
9198 /*
9199 * Do the decoding and emulation.
9200 */
9201 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9202 if (rcStrict == VINF_SUCCESS)
9203 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9204 else if (pVCpu->iem.s.cActiveMappings > 0)
9205 iemMemRollback(pVCpu);
9206
9207 if (rcStrict != VINF_SUCCESS)
9208 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9209 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9210 return rcStrict;
9211}
9212
9213
9214VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9215{
9216 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9217 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9218 if (rcStrict == VINF_SUCCESS)
9219 {
9220 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9221 if (pcbWritten)
9222 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9223 }
9224 else if (pVCpu->iem.s.cActiveMappings > 0)
9225 iemMemRollback(pVCpu);
9226
9227 return rcStrict;
9228}
9229
9230
9231VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9232 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9233{
9234 VBOXSTRICTRC rcStrict;
9235 if ( cbOpcodeBytes
9236 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9237 {
9238 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9239#ifdef IEM_WITH_CODE_TLB
9240 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9241 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9242 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9243 pVCpu->iem.s.offCurInstrStart = 0;
9244 pVCpu->iem.s.offInstrNextByte = 0;
9245 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9246#else
9247 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9248 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9249#endif
9250 rcStrict = VINF_SUCCESS;
9251 }
9252 else
9253 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9254 if (rcStrict == VINF_SUCCESS)
9255 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9256 else if (pVCpu->iem.s.cActiveMappings > 0)
9257 iemMemRollback(pVCpu);
9258
9259 return rcStrict;
9260}
9261
9262
9263VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9264{
9265 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9266 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9267 if (rcStrict == VINF_SUCCESS)
9268 {
9269 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9270 if (pcbWritten)
9271 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9272 }
9273 else if (pVCpu->iem.s.cActiveMappings > 0)
9274 iemMemRollback(pVCpu);
9275
9276 return rcStrict;
9277}
9278
9279
9280VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9281 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9282{
9283 VBOXSTRICTRC rcStrict;
9284 if ( cbOpcodeBytes
9285 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9286 {
9287 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9288#ifdef IEM_WITH_CODE_TLB
9289 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9290 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9291 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9292 pVCpu->iem.s.offCurInstrStart = 0;
9293 pVCpu->iem.s.offInstrNextByte = 0;
9294 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9295#else
9296 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9297 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9298#endif
9299 rcStrict = VINF_SUCCESS;
9300 }
9301 else
9302 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9303 if (rcStrict == VINF_SUCCESS)
9304 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9305 else if (pVCpu->iem.s.cActiveMappings > 0)
9306 iemMemRollback(pVCpu);
9307
9308 return rcStrict;
9309}
9310
9311
9312/**
9313 * For handling split cacheline lock operations when the host has split-lock
9314 * detection enabled.
9315 *
9316 * This will cause the interpreter to disregard the lock prefix and implicit
9317 * locking (xchg).
9318 *
9319 * @returns Strict VBox status code.
9320 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9321 */
9322VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9323{
9324 /*
9325 * Do the decoding and emulation.
9326 */
9327 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9328 if (rcStrict == VINF_SUCCESS)
9329 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9330 else if (pVCpu->iem.s.cActiveMappings > 0)
9331 iemMemRollback(pVCpu);
9332
9333 if (rcStrict != VINF_SUCCESS)
9334 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9335 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9336 return rcStrict;
9337}
9338
9339
9340/**
9341 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9342 * inject a pending TRPM trap.
9343 */
9344VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9345{
9346 Assert(TRPMHasTrap(pVCpu));
9347
9348 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9349 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9350 {
9351 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9352#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9353 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9354 if (fIntrEnabled)
9355 {
9356 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9357 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9358 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9359 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9360 else
9361 {
9362 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9363 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9364 }
9365 }
9366#else
9367 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9368#endif
9369 if (fIntrEnabled)
9370 {
9371 uint8_t u8TrapNo;
9372 TRPMEVENT enmType;
9373 uint32_t uErrCode;
9374 RTGCPTR uCr2;
9375 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9376 AssertRC(rc2);
9377 Assert(enmType == TRPM_HARDWARE_INT);
9378 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9379
9380 TRPMResetTrap(pVCpu);
9381
9382#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9383 /* Injecting an event may cause a VM-exit. */
9384 if ( rcStrict != VINF_SUCCESS
9385 && rcStrict != VINF_IEM_RAISED_XCPT)
9386 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9387#else
9388 NOREF(rcStrict);
9389#endif
9390 }
9391 }
9392
9393 return VINF_SUCCESS;
9394}
9395
9396
9397VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9398{
9399 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9400 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9401 Assert(cMaxInstructions > 0);
9402
9403 /*
9404 * See if there is an interrupt pending in TRPM, inject it if we can.
9405 */
9406 /** @todo What if we are injecting an exception and not an interrupt? Is that
9407 * possible here? For now we assert it is indeed only an interrupt. */
9408 if (!TRPMHasTrap(pVCpu))
9409 { /* likely */ }
9410 else
9411 {
9412 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9413 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9414 { /*likely */ }
9415 else
9416 return rcStrict;
9417 }
9418
9419 /*
9420 * Initial decoder init w/ prefetch, then setup setjmp.
9421 */
9422 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9423 if (rcStrict == VINF_SUCCESS)
9424 {
9425#ifdef IEM_WITH_SETJMP
9426 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9427 IEM_TRY_SETJMP(pVCpu, rcStrict)
9428#endif
9429 {
9430 /*
9431 * The run loop. We limit ourselves to 4096 instructions right now.
9432 */
9433 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9434 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9435 for (;;)
9436 {
9437 /*
9438 * Log the state.
9439 */
9440#ifdef LOG_ENABLED
9441 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9442#endif
9443
9444 /*
9445 * Do the decoding and emulation.
9446 */
9447 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9448 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9449#ifdef VBOX_STRICT
9450 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9451#endif
9452 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9453 {
9454 Assert(pVCpu->iem.s.cActiveMappings == 0);
9455 pVCpu->iem.s.cInstructions++;
9456
9457#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9458 /* Perform any VMX nested-guest instruction boundary actions. */
9459 uint64_t fCpu = pVCpu->fLocalForcedActions;
9460 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9461 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9462 { /* likely */ }
9463 else
9464 {
9465 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9467 fCpu = pVCpu->fLocalForcedActions;
9468 else
9469 {
9470 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9471 break;
9472 }
9473 }
9474#endif
9475 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9476 {
9477#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9478 uint64_t fCpu = pVCpu->fLocalForcedActions;
9479#endif
9480 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9481 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9482 | VMCPU_FF_TLB_FLUSH
9483 | VMCPU_FF_UNHALT );
9484
9485 if (RT_LIKELY( ( !fCpu
9486 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9487 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9488 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9489 {
9490 if (--cMaxInstructionsGccStupidity > 0)
9491 {
9492 /* Poll timers every now an then according to the caller's specs. */
9493 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9494 || !TMTimerPollBool(pVM, pVCpu))
9495 {
9496 Assert(pVCpu->iem.s.cActiveMappings == 0);
9497 iemReInitDecoder(pVCpu);
9498 continue;
9499 }
9500 }
9501 }
9502 }
9503 Assert(pVCpu->iem.s.cActiveMappings == 0);
9504 }
9505 else if (pVCpu->iem.s.cActiveMappings > 0)
9506 iemMemRollback(pVCpu);
9507 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9508 break;
9509 }
9510 }
9511#ifdef IEM_WITH_SETJMP
9512 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9513 {
9514 if (pVCpu->iem.s.cActiveMappings > 0)
9515 iemMemRollback(pVCpu);
9516# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9517 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9518# endif
9519 pVCpu->iem.s.cLongJumps++;
9520 }
9521 IEM_CATCH_LONGJMP_END(pVCpu);
9522#endif
9523
9524 /*
9525 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9526 */
9527 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9528 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9529 }
9530 else
9531 {
9532 if (pVCpu->iem.s.cActiveMappings > 0)
9533 iemMemRollback(pVCpu);
9534
9535#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9536 /*
9537 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9538 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9539 */
9540 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9541#endif
9542 }
9543
9544 /*
9545 * Maybe re-enter raw-mode and log.
9546 */
9547 if (rcStrict != VINF_SUCCESS)
9548 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9549 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9550 if (pcInstructions)
9551 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9552 return rcStrict;
9553}
9554
9555
9556/**
9557 * Interface used by EMExecuteExec, does exit statistics and limits.
9558 *
9559 * @returns Strict VBox status code.
9560 * @param pVCpu The cross context virtual CPU structure.
9561 * @param fWillExit To be defined.
9562 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9563 * @param cMaxInstructions Maximum number of instructions to execute.
9564 * @param cMaxInstructionsWithoutExits
9565 * The max number of instructions without exits.
9566 * @param pStats Where to return statistics.
9567 */
9568VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9569 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9570{
9571 NOREF(fWillExit); /** @todo define flexible exit crits */
9572
9573 /*
9574 * Initialize return stats.
9575 */
9576 pStats->cInstructions = 0;
9577 pStats->cExits = 0;
9578 pStats->cMaxExitDistance = 0;
9579 pStats->cReserved = 0;
9580
9581 /*
9582 * Initial decoder init w/ prefetch, then setup setjmp.
9583 */
9584 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9585 if (rcStrict == VINF_SUCCESS)
9586 {
9587#ifdef IEM_WITH_SETJMP
9588 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9589 IEM_TRY_SETJMP(pVCpu, rcStrict)
9590#endif
9591 {
9592#ifdef IN_RING0
9593 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9594#endif
9595 uint32_t cInstructionSinceLastExit = 0;
9596
9597 /*
9598 * The run loop. We limit ourselves to 4096 instructions right now.
9599 */
9600 PVM pVM = pVCpu->CTX_SUFF(pVM);
9601 for (;;)
9602 {
9603 /*
9604 * Log the state.
9605 */
9606#ifdef LOG_ENABLED
9607 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9608#endif
9609
9610 /*
9611 * Do the decoding and emulation.
9612 */
9613 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9614
9615 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9616 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9617
9618 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9619 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9620 {
9621 pStats->cExits += 1;
9622 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9623 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9624 cInstructionSinceLastExit = 0;
9625 }
9626
9627 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9628 {
9629 Assert(pVCpu->iem.s.cActiveMappings == 0);
9630 pVCpu->iem.s.cInstructions++;
9631 pStats->cInstructions++;
9632 cInstructionSinceLastExit++;
9633
9634#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9635 /* Perform any VMX nested-guest instruction boundary actions. */
9636 uint64_t fCpu = pVCpu->fLocalForcedActions;
9637 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9638 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9639 { /* likely */ }
9640 else
9641 {
9642 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9643 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9644 fCpu = pVCpu->fLocalForcedActions;
9645 else
9646 {
9647 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9648 break;
9649 }
9650 }
9651#endif
9652 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9653 {
9654#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9655 uint64_t fCpu = pVCpu->fLocalForcedActions;
9656#endif
9657 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9658 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9659 | VMCPU_FF_TLB_FLUSH
9660 | VMCPU_FF_UNHALT );
9661 if (RT_LIKELY( ( ( !fCpu
9662 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9663 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9664 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9665 || pStats->cInstructions < cMinInstructions))
9666 {
9667 if (pStats->cInstructions < cMaxInstructions)
9668 {
9669 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9670 {
9671#ifdef IN_RING0
9672 if ( !fCheckPreemptionPending
9673 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9674#endif
9675 {
9676 Assert(pVCpu->iem.s.cActiveMappings == 0);
9677 iemReInitDecoder(pVCpu);
9678 continue;
9679 }
9680#ifdef IN_RING0
9681 rcStrict = VINF_EM_RAW_INTERRUPT;
9682 break;
9683#endif
9684 }
9685 }
9686 }
9687 Assert(!(fCpu & VMCPU_FF_IEM));
9688 }
9689 Assert(pVCpu->iem.s.cActiveMappings == 0);
9690 }
9691 else if (pVCpu->iem.s.cActiveMappings > 0)
9692 iemMemRollback(pVCpu);
9693 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9694 break;
9695 }
9696 }
9697#ifdef IEM_WITH_SETJMP
9698 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9699 {
9700 if (pVCpu->iem.s.cActiveMappings > 0)
9701 iemMemRollback(pVCpu);
9702 pVCpu->iem.s.cLongJumps++;
9703 }
9704 IEM_CATCH_LONGJMP_END(pVCpu);
9705#endif
9706
9707 /*
9708 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9709 */
9710 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9711 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9712 }
9713 else
9714 {
9715 if (pVCpu->iem.s.cActiveMappings > 0)
9716 iemMemRollback(pVCpu);
9717
9718#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9719 /*
9720 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9721 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9722 */
9723 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9724#endif
9725 }
9726
9727 /*
9728 * Maybe re-enter raw-mode and log.
9729 */
9730 if (rcStrict != VINF_SUCCESS)
9731 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9732 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9733 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9734 return rcStrict;
9735}
9736
9737
9738/**
9739 * Injects a trap, fault, abort, software interrupt or external interrupt.
9740 *
9741 * The parameter list matches TRPMQueryTrapAll pretty closely.
9742 *
9743 * @returns Strict VBox status code.
9744 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9745 * @param u8TrapNo The trap number.
9746 * @param enmType What type is it (trap/fault/abort), software
9747 * interrupt or hardware interrupt.
9748 * @param uErrCode The error code if applicable.
9749 * @param uCr2 The CR2 value if applicable.
9750 * @param cbInstr The instruction length (only relevant for
9751 * software interrupts).
9752 */
9753VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9754 uint8_t cbInstr)
9755{
9756 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9757#ifdef DBGFTRACE_ENABLED
9758 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9759 u8TrapNo, enmType, uErrCode, uCr2);
9760#endif
9761
9762 uint32_t fFlags;
9763 switch (enmType)
9764 {
9765 case TRPM_HARDWARE_INT:
9766 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9767 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9768 uErrCode = uCr2 = 0;
9769 break;
9770
9771 case TRPM_SOFTWARE_INT:
9772 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9773 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9774 uErrCode = uCr2 = 0;
9775 break;
9776
9777 case TRPM_TRAP:
9778 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9779 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9780 if (u8TrapNo == X86_XCPT_PF)
9781 fFlags |= IEM_XCPT_FLAGS_CR2;
9782 switch (u8TrapNo)
9783 {
9784 case X86_XCPT_DF:
9785 case X86_XCPT_TS:
9786 case X86_XCPT_NP:
9787 case X86_XCPT_SS:
9788 case X86_XCPT_PF:
9789 case X86_XCPT_AC:
9790 case X86_XCPT_GP:
9791 fFlags |= IEM_XCPT_FLAGS_ERR;
9792 break;
9793 }
9794 break;
9795
9796 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9797 }
9798
9799 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9800
9801 if (pVCpu->iem.s.cActiveMappings > 0)
9802 iemMemRollback(pVCpu);
9803
9804 return rcStrict;
9805}
9806
9807
9808/**
9809 * Injects the active TRPM event.
9810 *
9811 * @returns Strict VBox status code.
9812 * @param pVCpu The cross context virtual CPU structure.
9813 */
9814VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9815{
9816#ifndef IEM_IMPLEMENTS_TASKSWITCH
9817 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9818#else
9819 uint8_t u8TrapNo;
9820 TRPMEVENT enmType;
9821 uint32_t uErrCode;
9822 RTGCUINTPTR uCr2;
9823 uint8_t cbInstr;
9824 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9825 if (RT_FAILURE(rc))
9826 return rc;
9827
9828 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9829 * ICEBP \#DB injection as a special case. */
9830 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9831#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9832 if (rcStrict == VINF_SVM_VMEXIT)
9833 rcStrict = VINF_SUCCESS;
9834#endif
9835#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9836 if (rcStrict == VINF_VMX_VMEXIT)
9837 rcStrict = VINF_SUCCESS;
9838#endif
9839 /** @todo Are there any other codes that imply the event was successfully
9840 * delivered to the guest? See @bugref{6607}. */
9841 if ( rcStrict == VINF_SUCCESS
9842 || rcStrict == VINF_IEM_RAISED_XCPT)
9843 TRPMResetTrap(pVCpu);
9844
9845 return rcStrict;
9846#endif
9847}
9848
9849
9850VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9851{
9852 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9853 return VERR_NOT_IMPLEMENTED;
9854}
9855
9856
9857VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9858{
9859 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9860 return VERR_NOT_IMPLEMENTED;
9861}
9862
9863
9864/**
9865 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9866 *
9867 * This API ASSUMES that the caller has already verified that the guest code is
9868 * allowed to access the I/O port. (The I/O port is in the DX register in the
9869 * guest state.)
9870 *
9871 * @returns Strict VBox status code.
9872 * @param pVCpu The cross context virtual CPU structure.
9873 * @param cbValue The size of the I/O port access (1, 2, or 4).
9874 * @param enmAddrMode The addressing mode.
9875 * @param fRepPrefix Indicates whether a repeat prefix is used
9876 * (doesn't matter which for this instruction).
9877 * @param cbInstr The instruction length in bytes.
9878 * @param iEffSeg The effective segment address.
9879 * @param fIoChecked Whether the access to the I/O port has been
9880 * checked or not. It's typically checked in the
9881 * HM scenario.
9882 */
9883VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9884 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9885{
9886 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9887 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9888
9889 /*
9890 * State init.
9891 */
9892 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9893
9894 /*
9895 * Switch orgy for getting to the right handler.
9896 */
9897 VBOXSTRICTRC rcStrict;
9898 if (fRepPrefix)
9899 {
9900 switch (enmAddrMode)
9901 {
9902 case IEMMODE_16BIT:
9903 switch (cbValue)
9904 {
9905 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9906 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9907 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9908 default:
9909 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9910 }
9911 break;
9912
9913 case IEMMODE_32BIT:
9914 switch (cbValue)
9915 {
9916 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9917 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9918 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9919 default:
9920 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9921 }
9922 break;
9923
9924 case IEMMODE_64BIT:
9925 switch (cbValue)
9926 {
9927 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9928 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9929 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9930 default:
9931 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9932 }
9933 break;
9934
9935 default:
9936 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9937 }
9938 }
9939 else
9940 {
9941 switch (enmAddrMode)
9942 {
9943 case IEMMODE_16BIT:
9944 switch (cbValue)
9945 {
9946 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9947 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9948 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9949 default:
9950 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9951 }
9952 break;
9953
9954 case IEMMODE_32BIT:
9955 switch (cbValue)
9956 {
9957 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9958 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9959 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9960 default:
9961 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9962 }
9963 break;
9964
9965 case IEMMODE_64BIT:
9966 switch (cbValue)
9967 {
9968 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9969 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9970 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9971 default:
9972 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9973 }
9974 break;
9975
9976 default:
9977 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9978 }
9979 }
9980
9981 if (pVCpu->iem.s.cActiveMappings)
9982 iemMemRollback(pVCpu);
9983
9984 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9985}
9986
9987
9988/**
9989 * Interface for HM and EM for executing string I/O IN (read) instructions.
9990 *
9991 * This API ASSUMES that the caller has already verified that the guest code is
9992 * allowed to access the I/O port. (The I/O port is in the DX register in the
9993 * guest state.)
9994 *
9995 * @returns Strict VBox status code.
9996 * @param pVCpu The cross context virtual CPU structure.
9997 * @param cbValue The size of the I/O port access (1, 2, or 4).
9998 * @param enmAddrMode The addressing mode.
9999 * @param fRepPrefix Indicates whether a repeat prefix is used
10000 * (doesn't matter which for this instruction).
10001 * @param cbInstr The instruction length in bytes.
10002 * @param fIoChecked Whether the access to the I/O port has been
10003 * checked or not. It's typically checked in the
10004 * HM scenario.
10005 */
10006VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10007 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10008{
10009 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10010
10011 /*
10012 * State init.
10013 */
10014 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10015
10016 /*
10017 * Switch orgy for getting to the right handler.
10018 */
10019 VBOXSTRICTRC rcStrict;
10020 if (fRepPrefix)
10021 {
10022 switch (enmAddrMode)
10023 {
10024 case IEMMODE_16BIT:
10025 switch (cbValue)
10026 {
10027 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10028 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10029 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10030 default:
10031 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10032 }
10033 break;
10034
10035 case IEMMODE_32BIT:
10036 switch (cbValue)
10037 {
10038 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10039 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10040 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10041 default:
10042 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10043 }
10044 break;
10045
10046 case IEMMODE_64BIT:
10047 switch (cbValue)
10048 {
10049 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10050 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10051 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10052 default:
10053 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10054 }
10055 break;
10056
10057 default:
10058 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10059 }
10060 }
10061 else
10062 {
10063 switch (enmAddrMode)
10064 {
10065 case IEMMODE_16BIT:
10066 switch (cbValue)
10067 {
10068 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10069 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10070 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10071 default:
10072 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10073 }
10074 break;
10075
10076 case IEMMODE_32BIT:
10077 switch (cbValue)
10078 {
10079 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10080 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10081 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10082 default:
10083 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10084 }
10085 break;
10086
10087 case IEMMODE_64BIT:
10088 switch (cbValue)
10089 {
10090 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10091 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10092 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10093 default:
10094 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10095 }
10096 break;
10097
10098 default:
10099 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10100 }
10101 }
10102
10103 if ( pVCpu->iem.s.cActiveMappings == 0
10104 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10105 { /* likely */ }
10106 else
10107 {
10108 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10109 iemMemRollback(pVCpu);
10110 }
10111 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10112}
10113
10114
10115/**
10116 * Interface for rawmode to write execute an OUT instruction.
10117 *
10118 * @returns Strict VBox status code.
10119 * @param pVCpu The cross context virtual CPU structure.
10120 * @param cbInstr The instruction length in bytes.
10121 * @param u16Port The port to read.
10122 * @param fImm Whether the port is specified using an immediate operand or
10123 * using the implicit DX register.
10124 * @param cbReg The register size.
10125 *
10126 * @remarks In ring-0 not all of the state needs to be synced in.
10127 */
10128VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10129{
10130 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10131 Assert(cbReg <= 4 && cbReg != 3);
10132
10133 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10134 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10135 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10136 Assert(!pVCpu->iem.s.cActiveMappings);
10137 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10138}
10139
10140
10141/**
10142 * Interface for rawmode to write execute an IN instruction.
10143 *
10144 * @returns Strict VBox status code.
10145 * @param pVCpu The cross context virtual CPU structure.
10146 * @param cbInstr The instruction length in bytes.
10147 * @param u16Port The port to read.
10148 * @param fImm Whether the port is specified using an immediate operand or
10149 * using the implicit DX.
10150 * @param cbReg The register size.
10151 */
10152VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10153{
10154 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10155 Assert(cbReg <= 4 && cbReg != 3);
10156
10157 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10158 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10159 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10160 Assert(!pVCpu->iem.s.cActiveMappings);
10161 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10162}
10163
10164
10165/**
10166 * Interface for HM and EM to write to a CRx register.
10167 *
10168 * @returns Strict VBox status code.
10169 * @param pVCpu The cross context virtual CPU structure.
10170 * @param cbInstr The instruction length in bytes.
10171 * @param iCrReg The control register number (destination).
10172 * @param iGReg The general purpose register number (source).
10173 *
10174 * @remarks In ring-0 not all of the state needs to be synced in.
10175 */
10176VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10177{
10178 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10179 Assert(iCrReg < 16);
10180 Assert(iGReg < 16);
10181
10182 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10183 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10184 Assert(!pVCpu->iem.s.cActiveMappings);
10185 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10186}
10187
10188
10189/**
10190 * Interface for HM and EM to read from a CRx register.
10191 *
10192 * @returns Strict VBox status code.
10193 * @param pVCpu The cross context virtual CPU structure.
10194 * @param cbInstr The instruction length in bytes.
10195 * @param iGReg The general purpose register number (destination).
10196 * @param iCrReg The control register number (source).
10197 *
10198 * @remarks In ring-0 not all of the state needs to be synced in.
10199 */
10200VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10201{
10202 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10203 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10204 | CPUMCTX_EXTRN_APIC_TPR);
10205 Assert(iCrReg < 16);
10206 Assert(iGReg < 16);
10207
10208 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10209 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10210 Assert(!pVCpu->iem.s.cActiveMappings);
10211 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10212}
10213
10214
10215/**
10216 * Interface for HM and EM to write to a DRx register.
10217 *
10218 * @returns Strict VBox status code.
10219 * @param pVCpu The cross context virtual CPU structure.
10220 * @param cbInstr The instruction length in bytes.
10221 * @param iDrReg The debug register number (destination).
10222 * @param iGReg The general purpose register number (source).
10223 *
10224 * @remarks In ring-0 not all of the state needs to be synced in.
10225 */
10226VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10227{
10228 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10229 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10230 Assert(iDrReg < 8);
10231 Assert(iGReg < 16);
10232
10233 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10234 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10235 Assert(!pVCpu->iem.s.cActiveMappings);
10236 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10237}
10238
10239
10240/**
10241 * Interface for HM and EM to read from a DRx register.
10242 *
10243 * @returns Strict VBox status code.
10244 * @param pVCpu The cross context virtual CPU structure.
10245 * @param cbInstr The instruction length in bytes.
10246 * @param iGReg The general purpose register number (destination).
10247 * @param iDrReg The debug register number (source).
10248 *
10249 * @remarks In ring-0 not all of the state needs to be synced in.
10250 */
10251VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10252{
10253 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10255 Assert(iDrReg < 8);
10256 Assert(iGReg < 16);
10257
10258 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10259 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10260 Assert(!pVCpu->iem.s.cActiveMappings);
10261 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10262}
10263
10264
10265/**
10266 * Interface for HM and EM to clear the CR0[TS] bit.
10267 *
10268 * @returns Strict VBox status code.
10269 * @param pVCpu The cross context virtual CPU structure.
10270 * @param cbInstr The instruction length in bytes.
10271 *
10272 * @remarks In ring-0 not all of the state needs to be synced in.
10273 */
10274VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10275{
10276 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10277
10278 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10279 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10280 Assert(!pVCpu->iem.s.cActiveMappings);
10281 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10282}
10283
10284
10285/**
10286 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10287 *
10288 * @returns Strict VBox status code.
10289 * @param pVCpu The cross context virtual CPU structure.
10290 * @param cbInstr The instruction length in bytes.
10291 * @param uValue The value to load into CR0.
10292 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10293 * memory operand. Otherwise pass NIL_RTGCPTR.
10294 *
10295 * @remarks In ring-0 not all of the state needs to be synced in.
10296 */
10297VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10298{
10299 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10300
10301 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10302 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10303 Assert(!pVCpu->iem.s.cActiveMappings);
10304 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10305}
10306
10307
10308/**
10309 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10310 *
10311 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10312 *
10313 * @returns Strict VBox status code.
10314 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10315 * @param cbInstr The instruction length in bytes.
10316 * @remarks In ring-0 not all of the state needs to be synced in.
10317 * @thread EMT(pVCpu)
10318 */
10319VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10320{
10321 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10322
10323 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10324 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10325 Assert(!pVCpu->iem.s.cActiveMappings);
10326 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10327}
10328
10329
10330/**
10331 * Interface for HM and EM to emulate the WBINVD instruction.
10332 *
10333 * @returns Strict VBox status code.
10334 * @param pVCpu The cross context virtual CPU structure.
10335 * @param cbInstr The instruction length in bytes.
10336 *
10337 * @remarks In ring-0 not all of the state needs to be synced in.
10338 */
10339VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10340{
10341 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10342
10343 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10344 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10345 Assert(!pVCpu->iem.s.cActiveMappings);
10346 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10347}
10348
10349
10350/**
10351 * Interface for HM and EM to emulate the INVD instruction.
10352 *
10353 * @returns Strict VBox status code.
10354 * @param pVCpu The cross context virtual CPU structure.
10355 * @param cbInstr The instruction length in bytes.
10356 *
10357 * @remarks In ring-0 not all of the state needs to be synced in.
10358 */
10359VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10360{
10361 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10362
10363 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10364 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10365 Assert(!pVCpu->iem.s.cActiveMappings);
10366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10367}
10368
10369
10370/**
10371 * Interface for HM and EM to emulate the INVLPG instruction.
10372 *
10373 * @returns Strict VBox status code.
10374 * @retval VINF_PGM_SYNC_CR3
10375 *
10376 * @param pVCpu The cross context virtual CPU structure.
10377 * @param cbInstr The instruction length in bytes.
10378 * @param GCPtrPage The effective address of the page to invalidate.
10379 *
10380 * @remarks In ring-0 not all of the state needs to be synced in.
10381 */
10382VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10383{
10384 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10385
10386 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10387 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10388 Assert(!pVCpu->iem.s.cActiveMappings);
10389 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10390}
10391
10392
10393/**
10394 * Interface for HM and EM to emulate the INVPCID instruction.
10395 *
10396 * @returns Strict VBox status code.
10397 * @retval VINF_PGM_SYNC_CR3
10398 *
10399 * @param pVCpu The cross context virtual CPU structure.
10400 * @param cbInstr The instruction length in bytes.
10401 * @param iEffSeg The effective segment register.
10402 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10403 * @param uType The invalidation type.
10404 *
10405 * @remarks In ring-0 not all of the state needs to be synced in.
10406 */
10407VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10408 uint64_t uType)
10409{
10410 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10411
10412 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10413 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10414 Assert(!pVCpu->iem.s.cActiveMappings);
10415 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10416}
10417
10418
10419/**
10420 * Interface for HM and EM to emulate the CPUID instruction.
10421 *
10422 * @returns Strict VBox status code.
10423 *
10424 * @param pVCpu The cross context virtual CPU structure.
10425 * @param cbInstr The instruction length in bytes.
10426 *
10427 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10428 */
10429VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10430{
10431 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10432 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10433
10434 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10435 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10436 Assert(!pVCpu->iem.s.cActiveMappings);
10437 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10438}
10439
10440
10441/**
10442 * Interface for HM and EM to emulate the RDPMC instruction.
10443 *
10444 * @returns Strict VBox status code.
10445 *
10446 * @param pVCpu The cross context virtual CPU structure.
10447 * @param cbInstr The instruction length in bytes.
10448 *
10449 * @remarks Not all of the state needs to be synced in.
10450 */
10451VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10452{
10453 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10454 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10455
10456 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10457 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10458 Assert(!pVCpu->iem.s.cActiveMappings);
10459 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10460}
10461
10462
10463/**
10464 * Interface for HM and EM to emulate the RDTSC instruction.
10465 *
10466 * @returns Strict VBox status code.
10467 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10468 *
10469 * @param pVCpu The cross context virtual CPU structure.
10470 * @param cbInstr The instruction length in bytes.
10471 *
10472 * @remarks Not all of the state needs to be synced in.
10473 */
10474VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10475{
10476 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10477 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10478
10479 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10480 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10481 Assert(!pVCpu->iem.s.cActiveMappings);
10482 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10483}
10484
10485
10486/**
10487 * Interface for HM and EM to emulate the RDTSCP instruction.
10488 *
10489 * @returns Strict VBox status code.
10490 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10491 *
10492 * @param pVCpu The cross context virtual CPU structure.
10493 * @param cbInstr The instruction length in bytes.
10494 *
10495 * @remarks Not all of the state needs to be synced in. Recommended
10496 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10497 */
10498VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10499{
10500 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10501 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10502
10503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10505 Assert(!pVCpu->iem.s.cActiveMappings);
10506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10507}
10508
10509
10510/**
10511 * Interface for HM and EM to emulate the RDMSR instruction.
10512 *
10513 * @returns Strict VBox status code.
10514 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10515 *
10516 * @param pVCpu The cross context virtual CPU structure.
10517 * @param cbInstr The instruction length in bytes.
10518 *
10519 * @remarks Not all of the state needs to be synced in. Requires RCX and
10520 * (currently) all MSRs.
10521 */
10522VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10523{
10524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10525 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10526
10527 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10528 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10529 Assert(!pVCpu->iem.s.cActiveMappings);
10530 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10531}
10532
10533
10534/**
10535 * Interface for HM and EM to emulate the WRMSR instruction.
10536 *
10537 * @returns Strict VBox status code.
10538 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10539 *
10540 * @param pVCpu The cross context virtual CPU structure.
10541 * @param cbInstr The instruction length in bytes.
10542 *
10543 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10544 * and (currently) all MSRs.
10545 */
10546VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10547{
10548 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10549 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10550 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10551
10552 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10553 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10554 Assert(!pVCpu->iem.s.cActiveMappings);
10555 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10556}
10557
10558
10559/**
10560 * Interface for HM and EM to emulate the MONITOR instruction.
10561 *
10562 * @returns Strict VBox status code.
10563 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10564 *
10565 * @param pVCpu The cross context virtual CPU structure.
10566 * @param cbInstr The instruction length in bytes.
10567 *
10568 * @remarks Not all of the state needs to be synced in.
10569 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10570 * are used.
10571 */
10572VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10573{
10574 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10575 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10576
10577 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10578 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10579 Assert(!pVCpu->iem.s.cActiveMappings);
10580 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10581}
10582
10583
10584/**
10585 * Interface for HM and EM to emulate the MWAIT instruction.
10586 *
10587 * @returns Strict VBox status code.
10588 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10589 *
10590 * @param pVCpu The cross context virtual CPU structure.
10591 * @param cbInstr The instruction length in bytes.
10592 *
10593 * @remarks Not all of the state needs to be synced in.
10594 */
10595VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10596{
10597 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10598 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10599
10600 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10601 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10602 Assert(!pVCpu->iem.s.cActiveMappings);
10603 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10604}
10605
10606
10607/**
10608 * Interface for HM and EM to emulate the HLT instruction.
10609 *
10610 * @returns Strict VBox status code.
10611 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10612 *
10613 * @param pVCpu The cross context virtual CPU structure.
10614 * @param cbInstr The instruction length in bytes.
10615 *
10616 * @remarks Not all of the state needs to be synced in.
10617 */
10618VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10619{
10620 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10621
10622 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10623 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10624 Assert(!pVCpu->iem.s.cActiveMappings);
10625 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10626}
10627
10628
10629/**
10630 * Checks if IEM is in the process of delivering an event (interrupt or
10631 * exception).
10632 *
10633 * @returns true if we're in the process of raising an interrupt or exception,
10634 * false otherwise.
10635 * @param pVCpu The cross context virtual CPU structure.
10636 * @param puVector Where to store the vector associated with the
10637 * currently delivered event, optional.
10638 * @param pfFlags Where to store th event delivery flags (see
10639 * IEM_XCPT_FLAGS_XXX), optional.
10640 * @param puErr Where to store the error code associated with the
10641 * event, optional.
10642 * @param puCr2 Where to store the CR2 associated with the event,
10643 * optional.
10644 * @remarks The caller should check the flags to determine if the error code and
10645 * CR2 are valid for the event.
10646 */
10647VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10648{
10649 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10650 if (fRaisingXcpt)
10651 {
10652 if (puVector)
10653 *puVector = pVCpu->iem.s.uCurXcpt;
10654 if (pfFlags)
10655 *pfFlags = pVCpu->iem.s.fCurXcpt;
10656 if (puErr)
10657 *puErr = pVCpu->iem.s.uCurXcptErr;
10658 if (puCr2)
10659 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10660 }
10661 return fRaisingXcpt;
10662}
10663
10664#ifdef IN_RING3
10665
10666/**
10667 * Handles the unlikely and probably fatal merge cases.
10668 *
10669 * @returns Merged status code.
10670 * @param rcStrict Current EM status code.
10671 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10672 * with @a rcStrict.
10673 * @param iMemMap The memory mapping index. For error reporting only.
10674 * @param pVCpu The cross context virtual CPU structure of the calling
10675 * thread, for error reporting only.
10676 */
10677DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10678 unsigned iMemMap, PVMCPUCC pVCpu)
10679{
10680 if (RT_FAILURE_NP(rcStrict))
10681 return rcStrict;
10682
10683 if (RT_FAILURE_NP(rcStrictCommit))
10684 return rcStrictCommit;
10685
10686 if (rcStrict == rcStrictCommit)
10687 return rcStrictCommit;
10688
10689 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10690 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10691 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10692 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10693 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10694 return VERR_IOM_FF_STATUS_IPE;
10695}
10696
10697
10698/**
10699 * Helper for IOMR3ProcessForceFlag.
10700 *
10701 * @returns Merged status code.
10702 * @param rcStrict Current EM status code.
10703 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10704 * with @a rcStrict.
10705 * @param iMemMap The memory mapping index. For error reporting only.
10706 * @param pVCpu The cross context virtual CPU structure of the calling
10707 * thread, for error reporting only.
10708 */
10709DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10710{
10711 /* Simple. */
10712 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10713 return rcStrictCommit;
10714
10715 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10716 return rcStrict;
10717
10718 /* EM scheduling status codes. */
10719 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10720 && rcStrict <= VINF_EM_LAST))
10721 {
10722 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10723 && rcStrictCommit <= VINF_EM_LAST))
10724 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10725 }
10726
10727 /* Unlikely */
10728 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10729}
10730
10731
10732/**
10733 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10734 *
10735 * @returns Merge between @a rcStrict and what the commit operation returned.
10736 * @param pVM The cross context VM structure.
10737 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10738 * @param rcStrict The status code returned by ring-0 or raw-mode.
10739 */
10740VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10741{
10742 /*
10743 * Reset the pending commit.
10744 */
10745 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10746 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10747 ("%#x %#x %#x\n",
10748 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10749 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10750
10751 /*
10752 * Commit the pending bounce buffers (usually just one).
10753 */
10754 unsigned cBufs = 0;
10755 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10756 while (iMemMap-- > 0)
10757 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10758 {
10759 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10760 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10761 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10762
10763 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10764 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10765 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10766
10767 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10768 {
10769 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10770 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10771 pbBuf,
10772 cbFirst,
10773 PGMACCESSORIGIN_IEM);
10774 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10775 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10776 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10777 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10778 }
10779
10780 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10781 {
10782 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10783 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10784 pbBuf + cbFirst,
10785 cbSecond,
10786 PGMACCESSORIGIN_IEM);
10787 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10788 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10789 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10790 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10791 }
10792 cBufs++;
10793 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10794 }
10795
10796 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10797 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10798 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10799 pVCpu->iem.s.cActiveMappings = 0;
10800 return rcStrict;
10801}
10802
10803#endif /* IN_RING3 */
10804
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette