VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 101309

Last change on this file since 101309 was 100966, checked in by vboxsync, 15 months ago

VMM/PGM,IEM: Prepare work for write monitoring page containing recompiled code. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 445.5 KB
Line 
1/* $Id: IEMAll.cpp 100966 2023-08-24 23:23:58Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller == pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.pbInstrBuf = NULL;
819 pVCpu->iem.s.cbInstrBufTotal = 0;
820 RT_NOREF(cbInstr);
821#else
822 RT_NOREF(pVCpu, cbInstr);
823#endif
824}
825
826
827
828#ifdef IEM_WITH_CODE_TLB
829
830/**
831 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
832 * failure and jumps.
833 *
834 * We end up here for a number of reasons:
835 * - pbInstrBuf isn't yet initialized.
836 * - Advancing beyond the buffer boundrary (e.g. cross page).
837 * - Advancing beyond the CS segment limit.
838 * - Fetching from non-mappable page (e.g. MMIO).
839 *
840 * @param pVCpu The cross context virtual CPU structure of the
841 * calling thread.
842 * @param pvDst Where to return the bytes.
843 * @param cbDst Number of bytes to read. A value of zero is
844 * allowed for initializing pbInstrBuf (the
845 * recompiler does this). In this case it is best
846 * to set pbInstrBuf to NULL prior to the call.
847 */
848void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
849{
850# ifdef IN_RING3
851 for (;;)
852 {
853 Assert(cbDst <= 8);
854 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
855
856 /*
857 * We might have a partial buffer match, deal with that first to make the
858 * rest simpler. This is the first part of the cross page/buffer case.
859 */
860 if (pVCpu->iem.s.pbInstrBuf != NULL)
861 {
862 if (offBuf < pVCpu->iem.s.cbInstrBuf)
863 {
864 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
865 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
866 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
867
868 cbDst -= cbCopy;
869 pvDst = (uint8_t *)pvDst + cbCopy;
870 offBuf += cbCopy;
871 pVCpu->iem.s.offInstrNextByte += offBuf;
872 }
873 }
874
875 /*
876 * Check segment limit, figuring how much we're allowed to access at this point.
877 *
878 * We will fault immediately if RIP is past the segment limit / in non-canonical
879 * territory. If we do continue, there are one or more bytes to read before we
880 * end up in trouble and we need to do that first before faulting.
881 */
882 RTGCPTR GCPtrFirst;
883 uint32_t cbMaxRead;
884 if (IEM_IS_64BIT_CODE(pVCpu))
885 {
886 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
887 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
888 { /* likely */ }
889 else
890 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
891 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
892 }
893 else
894 {
895 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
896 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
897 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
898 { /* likely */ }
899 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
900 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
901 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
902 if (cbMaxRead != 0)
903 { /* likely */ }
904 else
905 {
906 /* Overflowed because address is 0 and limit is max. */
907 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
908 cbMaxRead = X86_PAGE_SIZE;
909 }
910 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
911 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
912 if (cbMaxRead2 < cbMaxRead)
913 cbMaxRead = cbMaxRead2;
914 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
915 }
916
917 /*
918 * Get the TLB entry for this piece of code.
919 */
920 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
921 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
922 if (pTlbe->uTag == uTag)
923 {
924 /* likely when executing lots of code, otherwise unlikely */
925# ifdef VBOX_WITH_STATISTICS
926 pVCpu->iem.s.CodeTlb.cTlbHits++;
927# endif
928 }
929 else
930 {
931 pVCpu->iem.s.CodeTlb.cTlbMisses++;
932 PGMPTWALK Walk;
933 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
934 if (RT_FAILURE(rc))
935 {
936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
937 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
938 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
939#endif
940 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
941 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
942 }
943
944 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
945 Assert(Walk.fSucceeded);
946 pTlbe->uTag = uTag;
947 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
948 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
949 pTlbe->GCPhys = Walk.GCPhys;
950 pTlbe->pbMappingR3 = NULL;
951 }
952
953 /*
954 * Check TLB page table level access flags.
955 */
956 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
957 {
958 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
959 {
960 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
961 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
962 }
963 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
964 {
965 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
966 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
967 }
968 }
969
970 /*
971 * Look up the physical page info if necessary.
972 */
973 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
974 { /* not necessary */ }
975 else
976 {
977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
978 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
979 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
980 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
981 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
982 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
983 { /* likely */ }
984 else
985 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
986 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
987 | IEMTLBE_F_NO_MAPPINGR3
988 | IEMTLBE_F_PG_NO_READ
989 | IEMTLBE_F_PG_NO_WRITE
990 | IEMTLBE_F_PG_UNASSIGNED
991 | IEMTLBE_F_PG_CODE_PAGE);
992 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
993 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
994 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
995 }
996
997# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
998 /*
999 * Try do a direct read using the pbMappingR3 pointer.
1000 */
1001 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1002 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1003 {
1004 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1005 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1006 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1007 {
1008 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1009 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1010 }
1011 else
1012 {
1013 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1014 if (cbInstr + (uint32_t)cbDst <= 15)
1015 {
1016 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1017 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1018 }
1019 else
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1023 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1024 }
1025 }
1026 if (cbDst <= cbMaxRead)
1027 {
1028 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1029 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1030
1031 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1032 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1033 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1034 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1035 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1036 return;
1037 }
1038 pVCpu->iem.s.pbInstrBuf = NULL;
1039
1040 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1041 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1042 }
1043# else
1044# error "refactor as needed"
1045 /*
1046 * If there is no special read handling, so we can read a bit more and
1047 * put it in the prefetch buffer.
1048 */
1049 if ( cbDst < cbMaxRead
1050 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1053 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1062 }
1063 else
1064 {
1065 Log((RT_SUCCESS(rcStrict)
1066 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1067 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1068 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1069 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1070 }
1071 }
1072# endif
1073 /*
1074 * Special read handling, so only read exactly what's needed.
1075 * This is a highly unlikely scenario.
1076 */
1077 else
1078 {
1079 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1080
1081 /* Check instruction length. */
1082 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1083 if (RT_LIKELY(cbInstr + cbDst <= 15))
1084 { /* likely */ }
1085 else
1086 {
1087 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1088 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1089 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1090 }
1091
1092 /* Do the reading. */
1093 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1094 if (cbToRead > 0)
1095 {
1096 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1097 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1098 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1099 { /* likely */ }
1100 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1101 {
1102 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1103 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1105 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1106 }
1107 else
1108 {
1109 Log((RT_SUCCESS(rcStrict)
1110 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1111 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1112 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1113 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1114 }
1115 }
1116
1117 /* Update the state and probably return. */
1118 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1119 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1120 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1121
1122 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1123 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1124 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1125 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1126 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1127 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1128 pVCpu->iem.s.pbInstrBuf = NULL;
1129 if (cbToRead == cbDst)
1130 return;
1131 }
1132
1133 /*
1134 * More to read, loop.
1135 */
1136 cbDst -= cbMaxRead;
1137 pvDst = (uint8_t *)pvDst + cbMaxRead;
1138 }
1139# else /* !IN_RING3 */
1140 RT_NOREF(pvDst, cbDst);
1141 if (pvDst || cbDst)
1142 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1143# endif /* !IN_RING3 */
1144}
1145
1146#else /* !IEM_WITH_CODE_TLB */
1147
1148/**
1149 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1150 * exception if it fails.
1151 *
1152 * @returns Strict VBox status code.
1153 * @param pVCpu The cross context virtual CPU structure of the
1154 * calling thread.
1155 * @param cbMin The minimum number of bytes relative offOpcode
1156 * that must be read.
1157 */
1158VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1159{
1160 /*
1161 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1162 *
1163 * First translate CS:rIP to a physical address.
1164 */
1165 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1166 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1167 uint8_t const cbLeft = cbOpcode - offOpcode;
1168 Assert(cbLeft < cbMin);
1169 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1170
1171 uint32_t cbToTryRead;
1172 RTGCPTR GCPtrNext;
1173 if (IEM_IS_64BIT_CODE(pVCpu))
1174 {
1175 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1176 if (!IEM_IS_CANONICAL(GCPtrNext))
1177 return iemRaiseGeneralProtectionFault0(pVCpu);
1178 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1179 }
1180 else
1181 {
1182 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1183 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1184 GCPtrNext32 += cbOpcode;
1185 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1186 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1187 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1188 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1189 if (!cbToTryRead) /* overflowed */
1190 {
1191 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1192 cbToTryRead = UINT32_MAX;
1193 /** @todo check out wrapping around the code segment. */
1194 }
1195 if (cbToTryRead < cbMin - cbLeft)
1196 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1197 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1198
1199 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1200 if (cbToTryRead > cbLeftOnPage)
1201 cbToTryRead = cbLeftOnPage;
1202 }
1203
1204 /* Restrict to opcode buffer space.
1205
1206 We're making ASSUMPTIONS here based on work done previously in
1207 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1208 be fetched in case of an instruction crossing two pages. */
1209 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1210 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1211 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1212 { /* likely */ }
1213 else
1214 {
1215 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1216 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1217 return iemRaiseGeneralProtectionFault0(pVCpu);
1218 }
1219
1220 PGMPTWALK Walk;
1221 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1222 if (RT_FAILURE(rc))
1223 {
1224 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1225#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1226 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1227 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1228#endif
1229 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1230 }
1231 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1232 {
1233 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1234#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1235 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1236 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1237#endif
1238 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1239 }
1240 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1241 {
1242 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1243#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1244 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1245 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1246#endif
1247 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1248 }
1249 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1250 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1251 /** @todo Check reserved bits and such stuff. PGM is better at doing
1252 * that, so do it when implementing the guest virtual address
1253 * TLB... */
1254
1255 /*
1256 * Read the bytes at this address.
1257 *
1258 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1259 * and since PATM should only patch the start of an instruction there
1260 * should be no need to check again here.
1261 */
1262 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1263 {
1264 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1265 cbToTryRead, PGMACCESSORIGIN_IEM);
1266 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1267 { /* likely */ }
1268 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1269 {
1270 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1271 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1272 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1273 }
1274 else
1275 {
1276 Log((RT_SUCCESS(rcStrict)
1277 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1278 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1279 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1280 return rcStrict;
1281 }
1282 }
1283 else
1284 {
1285 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1286 if (RT_SUCCESS(rc))
1287 { /* likely */ }
1288 else
1289 {
1290 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1291 return rc;
1292 }
1293 }
1294 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1295 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1296
1297 return VINF_SUCCESS;
1298}
1299
1300#endif /* !IEM_WITH_CODE_TLB */
1301#ifndef IEM_WITH_SETJMP
1302
1303/**
1304 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1305 *
1306 * @returns Strict VBox status code.
1307 * @param pVCpu The cross context virtual CPU structure of the
1308 * calling thread.
1309 * @param pb Where to return the opcode byte.
1310 */
1311VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1312{
1313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1314 if (rcStrict == VINF_SUCCESS)
1315 {
1316 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1317 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1318 pVCpu->iem.s.offOpcode = offOpcode + 1;
1319 }
1320 else
1321 *pb = 0;
1322 return rcStrict;
1323}
1324
1325#else /* IEM_WITH_SETJMP */
1326
1327/**
1328 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1329 *
1330 * @returns The opcode byte.
1331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1332 */
1333uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1334{
1335# ifdef IEM_WITH_CODE_TLB
1336 uint8_t u8;
1337 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1338 return u8;
1339# else
1340 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1341 if (rcStrict == VINF_SUCCESS)
1342 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1343 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1344# endif
1345}
1346
1347#endif /* IEM_WITH_SETJMP */
1348
1349#ifndef IEM_WITH_SETJMP
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1356 * @param pu16 Where to return the opcode dword.
1357 */
1358VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu16 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367
1368/**
1369 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1370 *
1371 * @returns Strict VBox status code.
1372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1373 * @param pu32 Where to return the opcode dword.
1374 */
1375VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1376{
1377 uint8_t u8;
1378 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1379 if (rcStrict == VINF_SUCCESS)
1380 *pu32 = (int8_t)u8;
1381 return rcStrict;
1382}
1383
1384
1385/**
1386 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1387 *
1388 * @returns Strict VBox status code.
1389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1390 * @param pu64 Where to return the opcode qword.
1391 */
1392VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1393{
1394 uint8_t u8;
1395 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1396 if (rcStrict == VINF_SUCCESS)
1397 *pu64 = (int8_t)u8;
1398 return rcStrict;
1399}
1400
1401#endif /* !IEM_WITH_SETJMP */
1402
1403
1404#ifndef IEM_WITH_SETJMP
1405
1406/**
1407 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1408 *
1409 * @returns Strict VBox status code.
1410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1411 * @param pu16 Where to return the opcode word.
1412 */
1413VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1414{
1415 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1416 if (rcStrict == VINF_SUCCESS)
1417 {
1418 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1419# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1420 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1421# else
1422 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1423# endif
1424 pVCpu->iem.s.offOpcode = offOpcode + 2;
1425 }
1426 else
1427 *pu16 = 0;
1428 return rcStrict;
1429}
1430
1431#else /* IEM_WITH_SETJMP */
1432
1433/**
1434 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1435 *
1436 * @returns The opcode word.
1437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1438 */
1439uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1440{
1441# ifdef IEM_WITH_CODE_TLB
1442 uint16_t u16;
1443 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1444 return u16;
1445# else
1446 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1447 if (rcStrict == VINF_SUCCESS)
1448 {
1449 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1450 pVCpu->iem.s.offOpcode += 2;
1451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1452 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1453# else
1454 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1455# endif
1456 }
1457 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1458# endif
1459}
1460
1461#endif /* IEM_WITH_SETJMP */
1462
1463#ifndef IEM_WITH_SETJMP
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu32 Where to return the opcode double word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu32 = 0;
1483 return rcStrict;
1484}
1485
1486
1487/**
1488 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1489 *
1490 * @returns Strict VBox status code.
1491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1492 * @param pu64 Where to return the opcode quad word.
1493 */
1494VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1495{
1496 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1497 if (rcStrict == VINF_SUCCESS)
1498 {
1499 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1500 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1501 pVCpu->iem.s.offOpcode = offOpcode + 2;
1502 }
1503 else
1504 *pu64 = 0;
1505 return rcStrict;
1506}
1507
1508#endif /* !IEM_WITH_SETJMP */
1509
1510#ifndef IEM_WITH_SETJMP
1511
1512/**
1513 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1514 *
1515 * @returns Strict VBox status code.
1516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1517 * @param pu32 Where to return the opcode dword.
1518 */
1519VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1520{
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1526 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1527# else
1528 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1529 pVCpu->iem.s.abOpcode[offOpcode + 1],
1530 pVCpu->iem.s.abOpcode[offOpcode + 2],
1531 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1532# endif
1533 pVCpu->iem.s.offOpcode = offOpcode + 4;
1534 }
1535 else
1536 *pu32 = 0;
1537 return rcStrict;
1538}
1539
1540#else /* IEM_WITH_SETJMP */
1541
1542/**
1543 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1544 *
1545 * @returns The opcode dword.
1546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1547 */
1548uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1549{
1550# ifdef IEM_WITH_CODE_TLB
1551 uint32_t u32;
1552 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1553 return u32;
1554# else
1555 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1556 if (rcStrict == VINF_SUCCESS)
1557 {
1558 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1559 pVCpu->iem.s.offOpcode = offOpcode + 4;
1560# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1561 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1562# else
1563 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1564 pVCpu->iem.s.abOpcode[offOpcode + 1],
1565 pVCpu->iem.s.abOpcode[offOpcode + 2],
1566 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1567# endif
1568 }
1569 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1570# endif
1571}
1572
1573#endif /* IEM_WITH_SETJMP */
1574
1575#ifndef IEM_WITH_SETJMP
1576
1577/**
1578 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1579 *
1580 * @returns Strict VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1582 * @param pu64 Where to return the opcode dword.
1583 */
1584VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1585{
1586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1587 if (rcStrict == VINF_SUCCESS)
1588 {
1589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1590 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1591 pVCpu->iem.s.abOpcode[offOpcode + 1],
1592 pVCpu->iem.s.abOpcode[offOpcode + 2],
1593 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1594 pVCpu->iem.s.offOpcode = offOpcode + 4;
1595 }
1596 else
1597 *pu64 = 0;
1598 return rcStrict;
1599}
1600
1601
1602/**
1603 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1604 *
1605 * @returns Strict VBox status code.
1606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1607 * @param pu64 Where to return the opcode qword.
1608 */
1609VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1610{
1611 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1612 if (rcStrict == VINF_SUCCESS)
1613 {
1614 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1615 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1616 pVCpu->iem.s.abOpcode[offOpcode + 1],
1617 pVCpu->iem.s.abOpcode[offOpcode + 2],
1618 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1619 pVCpu->iem.s.offOpcode = offOpcode + 4;
1620 }
1621 else
1622 *pu64 = 0;
1623 return rcStrict;
1624}
1625
1626#endif /* !IEM_WITH_SETJMP */
1627
1628#ifndef IEM_WITH_SETJMP
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1632 *
1633 * @returns Strict VBox status code.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 * @param pu64 Where to return the opcode qword.
1636 */
1637VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1638{
1639 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1640 if (rcStrict == VINF_SUCCESS)
1641 {
1642 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1643# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1644 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1645# else
1646 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1647 pVCpu->iem.s.abOpcode[offOpcode + 1],
1648 pVCpu->iem.s.abOpcode[offOpcode + 2],
1649 pVCpu->iem.s.abOpcode[offOpcode + 3],
1650 pVCpu->iem.s.abOpcode[offOpcode + 4],
1651 pVCpu->iem.s.abOpcode[offOpcode + 5],
1652 pVCpu->iem.s.abOpcode[offOpcode + 6],
1653 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1654# endif
1655 pVCpu->iem.s.offOpcode = offOpcode + 8;
1656 }
1657 else
1658 *pu64 = 0;
1659 return rcStrict;
1660}
1661
1662#else /* IEM_WITH_SETJMP */
1663
1664/**
1665 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1666 *
1667 * @returns The opcode qword.
1668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1669 */
1670uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1671{
1672# ifdef IEM_WITH_CODE_TLB
1673 uint64_t u64;
1674 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1675 return u64;
1676# else
1677 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1678 if (rcStrict == VINF_SUCCESS)
1679 {
1680 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1681 pVCpu->iem.s.offOpcode = offOpcode + 8;
1682# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1683 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1684# else
1685 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1686 pVCpu->iem.s.abOpcode[offOpcode + 1],
1687 pVCpu->iem.s.abOpcode[offOpcode + 2],
1688 pVCpu->iem.s.abOpcode[offOpcode + 3],
1689 pVCpu->iem.s.abOpcode[offOpcode + 4],
1690 pVCpu->iem.s.abOpcode[offOpcode + 5],
1691 pVCpu->iem.s.abOpcode[offOpcode + 6],
1692 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1693# endif
1694 }
1695 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1696# endif
1697}
1698
1699#endif /* IEM_WITH_SETJMP */
1700
1701
1702
1703/** @name Misc Worker Functions.
1704 * @{
1705 */
1706
1707/**
1708 * Gets the exception class for the specified exception vector.
1709 *
1710 * @returns The class of the specified exception.
1711 * @param uVector The exception vector.
1712 */
1713static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1714{
1715 Assert(uVector <= X86_XCPT_LAST);
1716 switch (uVector)
1717 {
1718 case X86_XCPT_DE:
1719 case X86_XCPT_TS:
1720 case X86_XCPT_NP:
1721 case X86_XCPT_SS:
1722 case X86_XCPT_GP:
1723 case X86_XCPT_SX: /* AMD only */
1724 return IEMXCPTCLASS_CONTRIBUTORY;
1725
1726 case X86_XCPT_PF:
1727 case X86_XCPT_VE: /* Intel only */
1728 return IEMXCPTCLASS_PAGE_FAULT;
1729
1730 case X86_XCPT_DF:
1731 return IEMXCPTCLASS_DOUBLE_FAULT;
1732 }
1733 return IEMXCPTCLASS_BENIGN;
1734}
1735
1736
1737/**
1738 * Evaluates how to handle an exception caused during delivery of another event
1739 * (exception / interrupt).
1740 *
1741 * @returns How to handle the recursive exception.
1742 * @param pVCpu The cross context virtual CPU structure of the
1743 * calling thread.
1744 * @param fPrevFlags The flags of the previous event.
1745 * @param uPrevVector The vector of the previous event.
1746 * @param fCurFlags The flags of the current exception.
1747 * @param uCurVector The vector of the current exception.
1748 * @param pfXcptRaiseInfo Where to store additional information about the
1749 * exception condition. Optional.
1750 */
1751VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1752 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1753{
1754 /*
1755 * Only CPU exceptions can be raised while delivering other events, software interrupt
1756 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1757 */
1758 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1759 Assert(pVCpu); RT_NOREF(pVCpu);
1760 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1761
1762 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1763 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1764 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1765 {
1766 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1767 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1768 {
1769 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1770 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1771 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1772 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1773 {
1774 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1775 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1776 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1777 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1778 uCurVector, pVCpu->cpum.GstCtx.cr2));
1779 }
1780 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1781 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1782 {
1783 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1784 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1785 }
1786 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1787 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1788 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1789 {
1790 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1791 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1792 }
1793 }
1794 else
1795 {
1796 if (uPrevVector == X86_XCPT_NMI)
1797 {
1798 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1799 if (uCurVector == X86_XCPT_PF)
1800 {
1801 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1802 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1803 }
1804 }
1805 else if ( uPrevVector == X86_XCPT_AC
1806 && uCurVector == X86_XCPT_AC)
1807 {
1808 enmRaise = IEMXCPTRAISE_CPU_HANG;
1809 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1810 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1811 }
1812 }
1813 }
1814 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1815 {
1816 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1817 if (uCurVector == X86_XCPT_PF)
1818 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1819 }
1820 else
1821 {
1822 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1823 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1824 }
1825
1826 if (pfXcptRaiseInfo)
1827 *pfXcptRaiseInfo = fRaiseInfo;
1828 return enmRaise;
1829}
1830
1831
1832/**
1833 * Enters the CPU shutdown state initiated by a triple fault or other
1834 * unrecoverable conditions.
1835 *
1836 * @returns Strict VBox status code.
1837 * @param pVCpu The cross context virtual CPU structure of the
1838 * calling thread.
1839 */
1840static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1841{
1842 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1843 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1844
1845 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1846 {
1847 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1848 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1849 }
1850
1851 RT_NOREF(pVCpu);
1852 return VINF_EM_TRIPLE_FAULT;
1853}
1854
1855
1856/**
1857 * Validates a new SS segment.
1858 *
1859 * @returns VBox strict status code.
1860 * @param pVCpu The cross context virtual CPU structure of the
1861 * calling thread.
1862 * @param NewSS The new SS selctor.
1863 * @param uCpl The CPL to load the stack for.
1864 * @param pDesc Where to return the descriptor.
1865 */
1866static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1867{
1868 /* Null selectors are not allowed (we're not called for dispatching
1869 interrupts with SS=0 in long mode). */
1870 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1871 {
1872 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1873 return iemRaiseTaskSwitchFault0(pVCpu);
1874 }
1875
1876 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1877 if ((NewSS & X86_SEL_RPL) != uCpl)
1878 {
1879 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1880 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1881 }
1882
1883 /*
1884 * Read the descriptor.
1885 */
1886 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1887 if (rcStrict != VINF_SUCCESS)
1888 return rcStrict;
1889
1890 /*
1891 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1892 */
1893 if (!pDesc->Legacy.Gen.u1DescType)
1894 {
1895 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1896 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1897 }
1898
1899 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1900 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1901 {
1902 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1903 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1904 }
1905 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1906 {
1907 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1908 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1909 }
1910
1911 /* Is it there? */
1912 /** @todo testcase: Is this checked before the canonical / limit check below? */
1913 if (!pDesc->Legacy.Gen.u1Present)
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1916 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1917 }
1918
1919 return VINF_SUCCESS;
1920}
1921
1922/** @} */
1923
1924
1925/** @name Raising Exceptions.
1926 *
1927 * @{
1928 */
1929
1930
1931/**
1932 * Loads the specified stack far pointer from the TSS.
1933 *
1934 * @returns VBox strict status code.
1935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1936 * @param uCpl The CPL to load the stack for.
1937 * @param pSelSS Where to return the new stack segment.
1938 * @param puEsp Where to return the new stack pointer.
1939 */
1940static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1941{
1942 VBOXSTRICTRC rcStrict;
1943 Assert(uCpl < 4);
1944
1945 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1946 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1947 {
1948 /*
1949 * 16-bit TSS (X86TSS16).
1950 */
1951 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1952 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1953 {
1954 uint32_t off = uCpl * 4 + 2;
1955 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1956 {
1957 /** @todo check actual access pattern here. */
1958 uint32_t u32Tmp = 0; /* gcc maybe... */
1959 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1960 if (rcStrict == VINF_SUCCESS)
1961 {
1962 *puEsp = RT_LOWORD(u32Tmp);
1963 *pSelSS = RT_HIWORD(u32Tmp);
1964 return VINF_SUCCESS;
1965 }
1966 }
1967 else
1968 {
1969 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1970 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1971 }
1972 break;
1973 }
1974
1975 /*
1976 * 32-bit TSS (X86TSS32).
1977 */
1978 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1979 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1980 {
1981 uint32_t off = uCpl * 8 + 4;
1982 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1983 {
1984/** @todo check actual access pattern here. */
1985 uint64_t u64Tmp;
1986 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1987 if (rcStrict == VINF_SUCCESS)
1988 {
1989 *puEsp = u64Tmp & UINT32_MAX;
1990 *pSelSS = (RTSEL)(u64Tmp >> 32);
1991 return VINF_SUCCESS;
1992 }
1993 }
1994 else
1995 {
1996 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1997 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1998 }
1999 break;
2000 }
2001
2002 default:
2003 AssertFailed();
2004 rcStrict = VERR_IEM_IPE_4;
2005 break;
2006 }
2007
2008 *puEsp = 0; /* make gcc happy */
2009 *pSelSS = 0; /* make gcc happy */
2010 return rcStrict;
2011}
2012
2013
2014/**
2015 * Loads the specified stack pointer from the 64-bit TSS.
2016 *
2017 * @returns VBox strict status code.
2018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2019 * @param uCpl The CPL to load the stack for.
2020 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2021 * @param puRsp Where to return the new stack pointer.
2022 */
2023static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2024{
2025 Assert(uCpl < 4);
2026 Assert(uIst < 8);
2027 *puRsp = 0; /* make gcc happy */
2028
2029 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2030 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2031
2032 uint32_t off;
2033 if (uIst)
2034 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2035 else
2036 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2037 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2038 {
2039 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2040 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2041 }
2042
2043 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2044}
2045
2046
2047/**
2048 * Adjust the CPU state according to the exception being raised.
2049 *
2050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2051 * @param u8Vector The exception that has been raised.
2052 */
2053DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2054{
2055 switch (u8Vector)
2056 {
2057 case X86_XCPT_DB:
2058 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2059 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2060 break;
2061 /** @todo Read the AMD and Intel exception reference... */
2062 }
2063}
2064
2065
2066/**
2067 * Implements exceptions and interrupts for real mode.
2068 *
2069 * @returns VBox strict status code.
2070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2071 * @param cbInstr The number of bytes to offset rIP by in the return
2072 * address.
2073 * @param u8Vector The interrupt / exception vector number.
2074 * @param fFlags The flags.
2075 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2076 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2077 */
2078static VBOXSTRICTRC
2079iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2080 uint8_t cbInstr,
2081 uint8_t u8Vector,
2082 uint32_t fFlags,
2083 uint16_t uErr,
2084 uint64_t uCr2) RT_NOEXCEPT
2085{
2086 NOREF(uErr); NOREF(uCr2);
2087 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2088
2089 /*
2090 * Read the IDT entry.
2091 */
2092 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2093 {
2094 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2095 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2096 }
2097 RTFAR16 Idte;
2098 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2099 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2100 {
2101 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2102 return rcStrict;
2103 }
2104
2105 /*
2106 * Push the stack frame.
2107 */
2108 uint16_t *pu16Frame;
2109 uint64_t uNewRsp;
2110 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2111 if (rcStrict != VINF_SUCCESS)
2112 return rcStrict;
2113
2114 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2115#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2116 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2117 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2118 fEfl |= UINT16_C(0xf000);
2119#endif
2120 pu16Frame[2] = (uint16_t)fEfl;
2121 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2122 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2123 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2124 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2125 return rcStrict;
2126
2127 /*
2128 * Load the vector address into cs:ip and make exception specific state
2129 * adjustments.
2130 */
2131 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2132 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2133 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2134 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2135 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2136 pVCpu->cpum.GstCtx.rip = Idte.off;
2137 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2138 IEMMISC_SET_EFL(pVCpu, fEfl);
2139
2140 /** @todo do we actually do this in real mode? */
2141 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2142 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2143
2144 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2145 so best leave them alone in case we're in a weird kind of real mode... */
2146
2147 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2148}
2149
2150
2151/**
2152 * Loads a NULL data selector into when coming from V8086 mode.
2153 *
2154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2155 * @param pSReg Pointer to the segment register.
2156 */
2157DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2158{
2159 pSReg->Sel = 0;
2160 pSReg->ValidSel = 0;
2161 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2162 {
2163 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2164 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2165 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2166 }
2167 else
2168 {
2169 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2170 /** @todo check this on AMD-V */
2171 pSReg->u64Base = 0;
2172 pSReg->u32Limit = 0;
2173 }
2174}
2175
2176
2177/**
2178 * Loads a segment selector during a task switch in V8086 mode.
2179 *
2180 * @param pSReg Pointer to the segment register.
2181 * @param uSel The selector value to load.
2182 */
2183DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2184{
2185 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2186 pSReg->Sel = uSel;
2187 pSReg->ValidSel = uSel;
2188 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2189 pSReg->u64Base = uSel << 4;
2190 pSReg->u32Limit = 0xffff;
2191 pSReg->Attr.u = 0xf3;
2192}
2193
2194
2195/**
2196 * Loads a segment selector during a task switch in protected mode.
2197 *
2198 * In this task switch scenario, we would throw \#TS exceptions rather than
2199 * \#GPs.
2200 *
2201 * @returns VBox strict status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 * @param pSReg Pointer to the segment register.
2204 * @param uSel The new selector value.
2205 *
2206 * @remarks This does _not_ handle CS or SS.
2207 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2208 */
2209static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2210{
2211 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2212
2213 /* Null data selector. */
2214 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2215 {
2216 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2218 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2219 return VINF_SUCCESS;
2220 }
2221
2222 /* Fetch the descriptor. */
2223 IEMSELDESC Desc;
2224 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2225 if (rcStrict != VINF_SUCCESS)
2226 {
2227 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2228 VBOXSTRICTRC_VAL(rcStrict)));
2229 return rcStrict;
2230 }
2231
2232 /* Must be a data segment or readable code segment. */
2233 if ( !Desc.Legacy.Gen.u1DescType
2234 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2235 {
2236 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2237 Desc.Legacy.Gen.u4Type));
2238 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2239 }
2240
2241 /* Check privileges for data segments and non-conforming code segments. */
2242 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2243 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2244 {
2245 /* The RPL and the new CPL must be less than or equal to the DPL. */
2246 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2247 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2248 {
2249 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2250 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2251 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2252 }
2253 }
2254
2255 /* Is it there? */
2256 if (!Desc.Legacy.Gen.u1Present)
2257 {
2258 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2259 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* The base and limit. */
2263 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2264 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2265
2266 /*
2267 * Ok, everything checked out fine. Now set the accessed bit before
2268 * committing the result into the registers.
2269 */
2270 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2271 {
2272 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2273 if (rcStrict != VINF_SUCCESS)
2274 return rcStrict;
2275 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2276 }
2277
2278 /* Commit */
2279 pSReg->Sel = uSel;
2280 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2281 pSReg->u32Limit = cbLimit;
2282 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2283 pSReg->ValidSel = uSel;
2284 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2285 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2286 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2287
2288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2289 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2290 return VINF_SUCCESS;
2291}
2292
2293
2294/**
2295 * Performs a task switch.
2296 *
2297 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2298 * caller is responsible for performing the necessary checks (like DPL, TSS
2299 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2300 * reference for JMP, CALL, IRET.
2301 *
2302 * If the task switch is the due to a software interrupt or hardware exception,
2303 * the caller is responsible for validating the TSS selector and descriptor. See
2304 * Intel Instruction reference for INT n.
2305 *
2306 * @returns VBox strict status code.
2307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2308 * @param enmTaskSwitch The cause of the task switch.
2309 * @param uNextEip The EIP effective after the task switch.
2310 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2311 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2312 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2313 * @param SelTSS The TSS selector of the new task.
2314 * @param pNewDescTSS Pointer to the new TSS descriptor.
2315 */
2316VBOXSTRICTRC
2317iemTaskSwitch(PVMCPUCC pVCpu,
2318 IEMTASKSWITCH enmTaskSwitch,
2319 uint32_t uNextEip,
2320 uint32_t fFlags,
2321 uint16_t uErr,
2322 uint64_t uCr2,
2323 RTSEL SelTSS,
2324 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2325{
2326 Assert(!IEM_IS_REAL_MODE(pVCpu));
2327 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2328 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2329
2330 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2331 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2332 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2333 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2334 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2335
2336 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2337 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2338
2339 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2340 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2341
2342 /* Update CR2 in case it's a page-fault. */
2343 /** @todo This should probably be done much earlier in IEM/PGM. See
2344 * @bugref{5653#c49}. */
2345 if (fFlags & IEM_XCPT_FLAGS_CR2)
2346 pVCpu->cpum.GstCtx.cr2 = uCr2;
2347
2348 /*
2349 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2350 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2351 */
2352 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2353 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2354 if (uNewTSSLimit < uNewTSSLimitMin)
2355 {
2356 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2357 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2358 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2359 }
2360
2361 /*
2362 * Task switches in VMX non-root mode always cause task switches.
2363 * The new TSS must have been read and validated (DPL, limits etc.) before a
2364 * task-switch VM-exit commences.
2365 *
2366 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2367 */
2368 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2369 {
2370 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2371 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2372 }
2373
2374 /*
2375 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2376 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2377 */
2378 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2379 {
2380 uint32_t const uExitInfo1 = SelTSS;
2381 uint32_t uExitInfo2 = uErr;
2382 switch (enmTaskSwitch)
2383 {
2384 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2385 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2386 default: break;
2387 }
2388 if (fFlags & IEM_XCPT_FLAGS_ERR)
2389 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2390 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2391 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2392
2393 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2394 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2395 RT_NOREF2(uExitInfo1, uExitInfo2);
2396 }
2397
2398 /*
2399 * Check the current TSS limit. The last written byte to the current TSS during the
2400 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2401 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2402 *
2403 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2404 * end up with smaller than "legal" TSS limits.
2405 */
2406 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2407 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2408 if (uCurTSSLimit < uCurTSSLimitMin)
2409 {
2410 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2411 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2412 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2413 }
2414
2415 /*
2416 * Verify that the new TSS can be accessed and map it. Map only the required contents
2417 * and not the entire TSS.
2418 */
2419 void *pvNewTSS;
2420 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2421 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2422 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2423 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2424 * not perform correct translation if this happens. See Intel spec. 7.2.1
2425 * "Task-State Segment". */
2426 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2430 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2431 return rcStrict;
2432 }
2433
2434 /*
2435 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2436 */
2437 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2438 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2439 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2440 {
2441 PX86DESC pDescCurTSS;
2442 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2443 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2444 if (rcStrict != VINF_SUCCESS)
2445 {
2446 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2447 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2448 return rcStrict;
2449 }
2450
2451 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2452 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2453 if (rcStrict != VINF_SUCCESS)
2454 {
2455 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2456 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2457 return rcStrict;
2458 }
2459
2460 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2461 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2462 {
2463 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2464 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2465 fEFlags &= ~X86_EFL_NT;
2466 }
2467 }
2468
2469 /*
2470 * Save the CPU state into the current TSS.
2471 */
2472 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2473 if (GCPtrNewTSS == GCPtrCurTSS)
2474 {
2475 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2476 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2477 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2478 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2479 pVCpu->cpum.GstCtx.ldtr.Sel));
2480 }
2481 if (fIsNewTSS386)
2482 {
2483 /*
2484 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2485 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2486 */
2487 void *pvCurTSS32;
2488 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2489 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2490 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2491 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2492 if (rcStrict != VINF_SUCCESS)
2493 {
2494 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2495 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2496 return rcStrict;
2497 }
2498
2499 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2500 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2501 pCurTSS32->eip = uNextEip;
2502 pCurTSS32->eflags = fEFlags;
2503 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2504 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2505 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2506 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2507 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2508 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2509 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2510 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2511 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2512 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2513 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2514 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2515 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2516 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2517
2518 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2522 VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525 }
2526 else
2527 {
2528 /*
2529 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2530 */
2531 void *pvCurTSS16;
2532 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2533 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2534 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2535 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2536 if (rcStrict != VINF_SUCCESS)
2537 {
2538 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2539 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2540 return rcStrict;
2541 }
2542
2543 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2544 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2545 pCurTSS16->ip = uNextEip;
2546 pCurTSS16->flags = (uint16_t)fEFlags;
2547 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2548 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2549 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2550 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2551 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2552 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2553 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2554 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2555 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2556 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2557 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2558 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2559
2560 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2561 if (rcStrict != VINF_SUCCESS)
2562 {
2563 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2564 VBOXSTRICTRC_VAL(rcStrict)));
2565 return rcStrict;
2566 }
2567 }
2568
2569 /*
2570 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2571 */
2572 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2573 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2574 {
2575 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2576 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2577 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2578 }
2579
2580 /*
2581 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2582 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2583 */
2584 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2585 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2586 bool fNewDebugTrap;
2587 if (fIsNewTSS386)
2588 {
2589 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2590 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2591 uNewEip = pNewTSS32->eip;
2592 uNewEflags = pNewTSS32->eflags;
2593 uNewEax = pNewTSS32->eax;
2594 uNewEcx = pNewTSS32->ecx;
2595 uNewEdx = pNewTSS32->edx;
2596 uNewEbx = pNewTSS32->ebx;
2597 uNewEsp = pNewTSS32->esp;
2598 uNewEbp = pNewTSS32->ebp;
2599 uNewEsi = pNewTSS32->esi;
2600 uNewEdi = pNewTSS32->edi;
2601 uNewES = pNewTSS32->es;
2602 uNewCS = pNewTSS32->cs;
2603 uNewSS = pNewTSS32->ss;
2604 uNewDS = pNewTSS32->ds;
2605 uNewFS = pNewTSS32->fs;
2606 uNewGS = pNewTSS32->gs;
2607 uNewLdt = pNewTSS32->selLdt;
2608 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2609 }
2610 else
2611 {
2612 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2613 uNewCr3 = 0;
2614 uNewEip = pNewTSS16->ip;
2615 uNewEflags = pNewTSS16->flags;
2616 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2617 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2618 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2619 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2620 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2621 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2622 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2623 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2624 uNewES = pNewTSS16->es;
2625 uNewCS = pNewTSS16->cs;
2626 uNewSS = pNewTSS16->ss;
2627 uNewDS = pNewTSS16->ds;
2628 uNewFS = 0;
2629 uNewGS = 0;
2630 uNewLdt = pNewTSS16->selLdt;
2631 fNewDebugTrap = false;
2632 }
2633
2634 if (GCPtrNewTSS == GCPtrCurTSS)
2635 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2636 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2637
2638 /*
2639 * We're done accessing the new TSS.
2640 */
2641 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2642 if (rcStrict != VINF_SUCCESS)
2643 {
2644 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2645 return rcStrict;
2646 }
2647
2648 /*
2649 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2650 */
2651 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2652 {
2653 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2654 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2655 if (rcStrict != VINF_SUCCESS)
2656 {
2657 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2658 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2659 return rcStrict;
2660 }
2661
2662 /* Check that the descriptor indicates the new TSS is available (not busy). */
2663 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2664 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2665 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2666
2667 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2668 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2669 if (rcStrict != VINF_SUCCESS)
2670 {
2671 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2672 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2673 return rcStrict;
2674 }
2675 }
2676
2677 /*
2678 * From this point on, we're technically in the new task. We will defer exceptions
2679 * until the completion of the task switch but before executing any instructions in the new task.
2680 */
2681 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2682 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2683 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2684 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2685 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2686 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2687 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2688
2689 /* Set the busy bit in TR. */
2690 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2691
2692 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2693 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2694 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2695 {
2696 uNewEflags |= X86_EFL_NT;
2697 }
2698
2699 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2700 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2701 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2702
2703 pVCpu->cpum.GstCtx.eip = uNewEip;
2704 pVCpu->cpum.GstCtx.eax = uNewEax;
2705 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2706 pVCpu->cpum.GstCtx.edx = uNewEdx;
2707 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2708 pVCpu->cpum.GstCtx.esp = uNewEsp;
2709 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2710 pVCpu->cpum.GstCtx.esi = uNewEsi;
2711 pVCpu->cpum.GstCtx.edi = uNewEdi;
2712
2713 uNewEflags &= X86_EFL_LIVE_MASK;
2714 uNewEflags |= X86_EFL_RA1_MASK;
2715 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2716
2717 /*
2718 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2719 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2720 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2721 */
2722 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2723 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2724
2725 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2726 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2727
2728 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2729 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2730
2731 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2732 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2733
2734 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2735 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2736
2737 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2738 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2739 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2740
2741 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2742 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2743 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2744 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2745
2746 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2747 {
2748 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2749 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2750 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2751 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2752 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2753 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2754 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2755 }
2756
2757 /*
2758 * Switch CR3 for the new task.
2759 */
2760 if ( fIsNewTSS386
2761 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2762 {
2763 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2764 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2765 AssertRCSuccessReturn(rc, rc);
2766
2767 /* Inform PGM. */
2768 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2769 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2770 AssertRCReturn(rc, rc);
2771 /* ignore informational status codes */
2772
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2774 }
2775
2776 /*
2777 * Switch LDTR for the new task.
2778 */
2779 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2780 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2781 else
2782 {
2783 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2784
2785 IEMSELDESC DescNewLdt;
2786 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2787 if (rcStrict != VINF_SUCCESS)
2788 {
2789 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2790 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2791 return rcStrict;
2792 }
2793 if ( !DescNewLdt.Legacy.Gen.u1Present
2794 || DescNewLdt.Legacy.Gen.u1DescType
2795 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2796 {
2797 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2798 uNewLdt, DescNewLdt.Legacy.u));
2799 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2800 }
2801
2802 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2803 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2804 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2805 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2806 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2807 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2808 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2809 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2810 }
2811
2812 IEMSELDESC DescSS;
2813 if (IEM_IS_V86_MODE(pVCpu))
2814 {
2815 IEM_SET_CPL(pVCpu, 3);
2816 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2817 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2818 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2819 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2820 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2821 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2822
2823 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2824 DescSS.Legacy.u = 0;
2825 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2826 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2827 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2828 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2829 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2830 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2831 DescSS.Legacy.Gen.u2Dpl = 3;
2832 }
2833 else
2834 {
2835 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2836
2837 /*
2838 * Load the stack segment for the new task.
2839 */
2840 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2841 {
2842 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2843 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2844 }
2845
2846 /* Fetch the descriptor. */
2847 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2848 if (rcStrict != VINF_SUCCESS)
2849 {
2850 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2851 VBOXSTRICTRC_VAL(rcStrict)));
2852 return rcStrict;
2853 }
2854
2855 /* SS must be a data segment and writable. */
2856 if ( !DescSS.Legacy.Gen.u1DescType
2857 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2858 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2859 {
2860 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2861 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2862 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2863 }
2864
2865 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2866 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2867 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2868 {
2869 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2870 uNewCpl));
2871 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2872 }
2873
2874 /* Is it there? */
2875 if (!DescSS.Legacy.Gen.u1Present)
2876 {
2877 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2878 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2879 }
2880
2881 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2882 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2883
2884 /* Set the accessed bit before committing the result into SS. */
2885 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2886 {
2887 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2888 if (rcStrict != VINF_SUCCESS)
2889 return rcStrict;
2890 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2891 }
2892
2893 /* Commit SS. */
2894 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2895 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2896 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2897 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2898 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2899 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2900 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2901
2902 /* CPL has changed, update IEM before loading rest of segments. */
2903 IEM_SET_CPL(pVCpu, uNewCpl);
2904
2905 /*
2906 * Load the data segments for the new task.
2907 */
2908 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2909 if (rcStrict != VINF_SUCCESS)
2910 return rcStrict;
2911 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2912 if (rcStrict != VINF_SUCCESS)
2913 return rcStrict;
2914 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2915 if (rcStrict != VINF_SUCCESS)
2916 return rcStrict;
2917 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2918 if (rcStrict != VINF_SUCCESS)
2919 return rcStrict;
2920
2921 /*
2922 * Load the code segment for the new task.
2923 */
2924 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2925 {
2926 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2927 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2928 }
2929
2930 /* Fetch the descriptor. */
2931 IEMSELDESC DescCS;
2932 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2933 if (rcStrict != VINF_SUCCESS)
2934 {
2935 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2936 return rcStrict;
2937 }
2938
2939 /* CS must be a code segment. */
2940 if ( !DescCS.Legacy.Gen.u1DescType
2941 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2942 {
2943 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2944 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2945 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2946 }
2947
2948 /* For conforming CS, DPL must be less than or equal to the RPL. */
2949 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2950 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2951 {
2952 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2953 DescCS.Legacy.Gen.u2Dpl));
2954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2955 }
2956
2957 /* For non-conforming CS, DPL must match RPL. */
2958 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2959 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2960 {
2961 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2962 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2963 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2964 }
2965
2966 /* Is it there? */
2967 if (!DescCS.Legacy.Gen.u1Present)
2968 {
2969 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2970 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2971 }
2972
2973 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2974 u64Base = X86DESC_BASE(&DescCS.Legacy);
2975
2976 /* Set the accessed bit before committing the result into CS. */
2977 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2978 {
2979 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2980 if (rcStrict != VINF_SUCCESS)
2981 return rcStrict;
2982 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2983 }
2984
2985 /* Commit CS. */
2986 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2987 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2988 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2989 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2990 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2991 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2992 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2993 }
2994
2995 /* Make sure the CPU mode is correct. */
2996 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2997 if (fExecNew != pVCpu->iem.s.fExec)
2998 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2999 pVCpu->iem.s.fExec = fExecNew;
3000
3001 /** @todo Debug trap. */
3002 if (fIsNewTSS386 && fNewDebugTrap)
3003 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3004
3005 /*
3006 * Construct the error code masks based on what caused this task switch.
3007 * See Intel Instruction reference for INT.
3008 */
3009 uint16_t uExt;
3010 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3011 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3012 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3013 uExt = 1;
3014 else
3015 uExt = 0;
3016
3017 /*
3018 * Push any error code on to the new stack.
3019 */
3020 if (fFlags & IEM_XCPT_FLAGS_ERR)
3021 {
3022 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3023 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3024 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3025
3026 /* Check that there is sufficient space on the stack. */
3027 /** @todo Factor out segment limit checking for normal/expand down segments
3028 * into a separate function. */
3029 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3030 {
3031 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3032 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3033 {
3034 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3035 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3036 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3037 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3038 }
3039 }
3040 else
3041 {
3042 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3043 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3044 {
3045 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3046 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3047 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3048 }
3049 }
3050
3051
3052 if (fIsNewTSS386)
3053 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3054 else
3055 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3056 if (rcStrict != VINF_SUCCESS)
3057 {
3058 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3059 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3060 return rcStrict;
3061 }
3062 }
3063
3064 /* Check the new EIP against the new CS limit. */
3065 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3066 {
3067 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3068 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3069 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3070 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3071 }
3072
3073 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3074 pVCpu->cpum.GstCtx.ss.Sel));
3075 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3076}
3077
3078
3079/**
3080 * Implements exceptions and interrupts for protected mode.
3081 *
3082 * @returns VBox strict status code.
3083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3084 * @param cbInstr The number of bytes to offset rIP by in the return
3085 * address.
3086 * @param u8Vector The interrupt / exception vector number.
3087 * @param fFlags The flags.
3088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3090 */
3091static VBOXSTRICTRC
3092iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3093 uint8_t cbInstr,
3094 uint8_t u8Vector,
3095 uint32_t fFlags,
3096 uint16_t uErr,
3097 uint64_t uCr2) RT_NOEXCEPT
3098{
3099 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3100
3101 /*
3102 * Read the IDT entry.
3103 */
3104 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3105 {
3106 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3107 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3108 }
3109 X86DESC Idte;
3110 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3111 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3113 {
3114 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3115 return rcStrict;
3116 }
3117 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3118 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3119 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3121
3122 /*
3123 * Check the descriptor type, DPL and such.
3124 * ASSUMES this is done in the same order as described for call-gate calls.
3125 */
3126 if (Idte.Gate.u1DescType)
3127 {
3128 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3129 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3130 }
3131 bool fTaskGate = false;
3132 uint8_t f32BitGate = true;
3133 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3134 switch (Idte.Gate.u4Type)
3135 {
3136 case X86_SEL_TYPE_SYS_UNDEFINED:
3137 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3138 case X86_SEL_TYPE_SYS_LDT:
3139 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3140 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3141 case X86_SEL_TYPE_SYS_UNDEFINED2:
3142 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3143 case X86_SEL_TYPE_SYS_UNDEFINED3:
3144 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3145 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3146 case X86_SEL_TYPE_SYS_UNDEFINED4:
3147 {
3148 /** @todo check what actually happens when the type is wrong...
3149 * esp. call gates. */
3150 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3151 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3152 }
3153
3154 case X86_SEL_TYPE_SYS_286_INT_GATE:
3155 f32BitGate = false;
3156 RT_FALL_THRU();
3157 case X86_SEL_TYPE_SYS_386_INT_GATE:
3158 fEflToClear |= X86_EFL_IF;
3159 break;
3160
3161 case X86_SEL_TYPE_SYS_TASK_GATE:
3162 fTaskGate = true;
3163#ifndef IEM_IMPLEMENTS_TASKSWITCH
3164 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3165#endif
3166 break;
3167
3168 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3169 f32BitGate = false;
3170 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3171 break;
3172
3173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3174 }
3175
3176 /* Check DPL against CPL if applicable. */
3177 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3178 {
3179 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3180 {
3181 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3182 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3183 }
3184 }
3185
3186 /* Is it there? */
3187 if (!Idte.Gate.u1Present)
3188 {
3189 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3190 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3191 }
3192
3193 /* Is it a task-gate? */
3194 if (fTaskGate)
3195 {
3196 /*
3197 * Construct the error code masks based on what caused this task switch.
3198 * See Intel Instruction reference for INT.
3199 */
3200 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3201 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3202 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3203 RTSEL SelTSS = Idte.Gate.u16Sel;
3204
3205 /*
3206 * Fetch the TSS descriptor in the GDT.
3207 */
3208 IEMSELDESC DescTSS;
3209 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3210 if (rcStrict != VINF_SUCCESS)
3211 {
3212 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3213 VBOXSTRICTRC_VAL(rcStrict)));
3214 return rcStrict;
3215 }
3216
3217 /* The TSS descriptor must be a system segment and be available (not busy). */
3218 if ( DescTSS.Legacy.Gen.u1DescType
3219 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3220 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3221 {
3222 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3223 u8Vector, SelTSS, DescTSS.Legacy.au64));
3224 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3225 }
3226
3227 /* The TSS must be present. */
3228 if (!DescTSS.Legacy.Gen.u1Present)
3229 {
3230 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3231 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3232 }
3233
3234 /* Do the actual task switch. */
3235 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3236 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3237 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3238 }
3239
3240 /* A null CS is bad. */
3241 RTSEL NewCS = Idte.Gate.u16Sel;
3242 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3243 {
3244 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3245 return iemRaiseGeneralProtectionFault0(pVCpu);
3246 }
3247
3248 /* Fetch the descriptor for the new CS. */
3249 IEMSELDESC DescCS;
3250 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3251 if (rcStrict != VINF_SUCCESS)
3252 {
3253 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3254 return rcStrict;
3255 }
3256
3257 /* Must be a code segment. */
3258 if (!DescCS.Legacy.Gen.u1DescType)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3261 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3262 }
3263 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3264 {
3265 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3266 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3267 }
3268
3269 /* Don't allow lowering the privilege level. */
3270 /** @todo Does the lowering of privileges apply to software interrupts
3271 * only? This has bearings on the more-privileged or
3272 * same-privilege stack behavior further down. A testcase would
3273 * be nice. */
3274 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3275 {
3276 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3277 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3278 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3279 }
3280
3281 /* Make sure the selector is present. */
3282 if (!DescCS.Legacy.Gen.u1Present)
3283 {
3284 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3285 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3286 }
3287
3288#ifdef LOG_ENABLED
3289 /* If software interrupt, try decode it if logging is enabled and such. */
3290 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3291 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3292 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3293#endif
3294
3295 /* Check the new EIP against the new CS limit. */
3296 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3297 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3298 ? Idte.Gate.u16OffsetLow
3299 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3300 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3301 if (uNewEip > cbLimitCS)
3302 {
3303 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3304 u8Vector, uNewEip, cbLimitCS, NewCS));
3305 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3306 }
3307 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3308
3309 /* Calc the flag image to push. */
3310 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3311 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3312 fEfl &= ~X86_EFL_RF;
3313 else
3314 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3315
3316 /* From V8086 mode only go to CPL 0. */
3317 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3318 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3319 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3320 {
3321 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3322 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3323 }
3324
3325 /*
3326 * If the privilege level changes, we need to get a new stack from the TSS.
3327 * This in turns means validating the new SS and ESP...
3328 */
3329 if (uNewCpl != IEM_GET_CPL(pVCpu))
3330 {
3331 RTSEL NewSS;
3332 uint32_t uNewEsp;
3333 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3334 if (rcStrict != VINF_SUCCESS)
3335 return rcStrict;
3336
3337 IEMSELDESC DescSS;
3338 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3339 if (rcStrict != VINF_SUCCESS)
3340 return rcStrict;
3341 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3342 if (!DescSS.Legacy.Gen.u1DefBig)
3343 {
3344 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3345 uNewEsp = (uint16_t)uNewEsp;
3346 }
3347
3348 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3349
3350 /* Check that there is sufficient space for the stack frame. */
3351 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3352 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3353 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3354 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3355
3356 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3357 {
3358 if ( uNewEsp - 1 > cbLimitSS
3359 || uNewEsp < cbStackFrame)
3360 {
3361 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3362 u8Vector, NewSS, uNewEsp, cbStackFrame));
3363 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3364 }
3365 }
3366 else
3367 {
3368 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3369 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3370 {
3371 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3372 u8Vector, NewSS, uNewEsp, cbStackFrame));
3373 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3374 }
3375 }
3376
3377 /*
3378 * Start making changes.
3379 */
3380
3381 /* Set the new CPL so that stack accesses use it. */
3382 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3383 IEM_SET_CPL(pVCpu, uNewCpl);
3384
3385 /* Create the stack frame. */
3386 RTPTRUNION uStackFrame;
3387 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3388 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3389 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3390 if (rcStrict != VINF_SUCCESS)
3391 return rcStrict;
3392 void * const pvStackFrame = uStackFrame.pv;
3393 if (f32BitGate)
3394 {
3395 if (fFlags & IEM_XCPT_FLAGS_ERR)
3396 *uStackFrame.pu32++ = uErr;
3397 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3398 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3399 uStackFrame.pu32[2] = fEfl;
3400 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3401 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3402 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3403 if (fEfl & X86_EFL_VM)
3404 {
3405 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3406 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3407 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3408 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3409 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3410 }
3411 }
3412 else
3413 {
3414 if (fFlags & IEM_XCPT_FLAGS_ERR)
3415 *uStackFrame.pu16++ = uErr;
3416 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3417 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3418 uStackFrame.pu16[2] = fEfl;
3419 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3420 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3421 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3422 if (fEfl & X86_EFL_VM)
3423 {
3424 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3425 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3426 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3427 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3428 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3429 }
3430 }
3431 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3432 if (rcStrict != VINF_SUCCESS)
3433 return rcStrict;
3434
3435 /* Mark the selectors 'accessed' (hope this is the correct time). */
3436 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3437 * after pushing the stack frame? (Write protect the gdt + stack to
3438 * find out.) */
3439 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3440 {
3441 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3442 if (rcStrict != VINF_SUCCESS)
3443 return rcStrict;
3444 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3445 }
3446
3447 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3448 {
3449 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3450 if (rcStrict != VINF_SUCCESS)
3451 return rcStrict;
3452 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3453 }
3454
3455 /*
3456 * Start comitting the register changes (joins with the DPL=CPL branch).
3457 */
3458 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3459 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3460 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3461 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3462 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3463 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3464 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3465 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3466 * SP is loaded).
3467 * Need to check the other combinations too:
3468 * - 16-bit TSS, 32-bit handler
3469 * - 32-bit TSS, 16-bit handler */
3470 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3471 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3472 else
3473 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3474
3475 if (fEfl & X86_EFL_VM)
3476 {
3477 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3478 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3479 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3480 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3481 }
3482 }
3483 /*
3484 * Same privilege, no stack change and smaller stack frame.
3485 */
3486 else
3487 {
3488 uint64_t uNewRsp;
3489 RTPTRUNION uStackFrame;
3490 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3491 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3492 if (rcStrict != VINF_SUCCESS)
3493 return rcStrict;
3494 void * const pvStackFrame = uStackFrame.pv;
3495
3496 if (f32BitGate)
3497 {
3498 if (fFlags & IEM_XCPT_FLAGS_ERR)
3499 *uStackFrame.pu32++ = uErr;
3500 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3501 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3502 uStackFrame.pu32[2] = fEfl;
3503 }
3504 else
3505 {
3506 if (fFlags & IEM_XCPT_FLAGS_ERR)
3507 *uStackFrame.pu16++ = uErr;
3508 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3509 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3510 uStackFrame.pu16[2] = fEfl;
3511 }
3512 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3513 if (rcStrict != VINF_SUCCESS)
3514 return rcStrict;
3515
3516 /* Mark the CS selector as 'accessed'. */
3517 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3518 {
3519 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3520 if (rcStrict != VINF_SUCCESS)
3521 return rcStrict;
3522 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3523 }
3524
3525 /*
3526 * Start committing the register changes (joins with the other branch).
3527 */
3528 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3529 }
3530
3531 /* ... register committing continues. */
3532 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3533 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3534 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3535 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3536 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3537 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3538
3539 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3540 fEfl &= ~fEflToClear;
3541 IEMMISC_SET_EFL(pVCpu, fEfl);
3542
3543 if (fFlags & IEM_XCPT_FLAGS_CR2)
3544 pVCpu->cpum.GstCtx.cr2 = uCr2;
3545
3546 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3547 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3548
3549 /* Make sure the execution flags are correct. */
3550 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3551 if (fExecNew != pVCpu->iem.s.fExec)
3552 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3553 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3554 pVCpu->iem.s.fExec = fExecNew;
3555 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3556
3557 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3558}
3559
3560
3561/**
3562 * Implements exceptions and interrupts for long mode.
3563 *
3564 * @returns VBox strict status code.
3565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3566 * @param cbInstr The number of bytes to offset rIP by in the return
3567 * address.
3568 * @param u8Vector The interrupt / exception vector number.
3569 * @param fFlags The flags.
3570 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3571 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3572 */
3573static VBOXSTRICTRC
3574iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3575 uint8_t cbInstr,
3576 uint8_t u8Vector,
3577 uint32_t fFlags,
3578 uint16_t uErr,
3579 uint64_t uCr2) RT_NOEXCEPT
3580{
3581 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3582
3583 /*
3584 * Read the IDT entry.
3585 */
3586 uint16_t offIdt = (uint16_t)u8Vector << 4;
3587 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3588 {
3589 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3590 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3591 }
3592 X86DESC64 Idte;
3593#ifdef _MSC_VER /* Shut up silly compiler warning. */
3594 Idte.au64[0] = 0;
3595 Idte.au64[1] = 0;
3596#endif
3597 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3598 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3599 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3600 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3601 {
3602 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3603 return rcStrict;
3604 }
3605 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3606 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3607 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3608
3609 /*
3610 * Check the descriptor type, DPL and such.
3611 * ASSUMES this is done in the same order as described for call-gate calls.
3612 */
3613 if (Idte.Gate.u1DescType)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3616 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3619 switch (Idte.Gate.u4Type)
3620 {
3621 case AMD64_SEL_TYPE_SYS_INT_GATE:
3622 fEflToClear |= X86_EFL_IF;
3623 break;
3624 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3625 break;
3626
3627 default:
3628 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3629 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3630 }
3631
3632 /* Check DPL against CPL if applicable. */
3633 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3634 {
3635 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3636 {
3637 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3638 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3639 }
3640 }
3641
3642 /* Is it there? */
3643 if (!Idte.Gate.u1Present)
3644 {
3645 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3646 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3647 }
3648
3649 /* A null CS is bad. */
3650 RTSEL NewCS = Idte.Gate.u16Sel;
3651 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3652 {
3653 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3654 return iemRaiseGeneralProtectionFault0(pVCpu);
3655 }
3656
3657 /* Fetch the descriptor for the new CS. */
3658 IEMSELDESC DescCS;
3659 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3660 if (rcStrict != VINF_SUCCESS)
3661 {
3662 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3663 return rcStrict;
3664 }
3665
3666 /* Must be a 64-bit code segment. */
3667 if (!DescCS.Long.Gen.u1DescType)
3668 {
3669 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3670 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3671 }
3672 if ( !DescCS.Long.Gen.u1Long
3673 || DescCS.Long.Gen.u1DefBig
3674 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3677 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3678 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3679 }
3680
3681 /* Don't allow lowering the privilege level. For non-conforming CS
3682 selectors, the CS.DPL sets the privilege level the trap/interrupt
3683 handler runs at. For conforming CS selectors, the CPL remains
3684 unchanged, but the CS.DPL must be <= CPL. */
3685 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3686 * when CPU in Ring-0. Result \#GP? */
3687 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3688 {
3689 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3690 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3691 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3692 }
3693
3694
3695 /* Make sure the selector is present. */
3696 if (!DescCS.Legacy.Gen.u1Present)
3697 {
3698 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3699 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3700 }
3701
3702 /* Check that the new RIP is canonical. */
3703 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3704 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3705 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3706 if (!IEM_IS_CANONICAL(uNewRip))
3707 {
3708 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3709 return iemRaiseGeneralProtectionFault0(pVCpu);
3710 }
3711
3712 /*
3713 * If the privilege level changes or if the IST isn't zero, we need to get
3714 * a new stack from the TSS.
3715 */
3716 uint64_t uNewRsp;
3717 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3718 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3719 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3720 || Idte.Gate.u3IST != 0)
3721 {
3722 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3723 if (rcStrict != VINF_SUCCESS)
3724 return rcStrict;
3725 }
3726 else
3727 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3728 uNewRsp &= ~(uint64_t)0xf;
3729
3730 /*
3731 * Calc the flag image to push.
3732 */
3733 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3734 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3735 fEfl &= ~X86_EFL_RF;
3736 else
3737 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3738
3739 /*
3740 * Start making changes.
3741 */
3742 /* Set the new CPL so that stack accesses use it. */
3743 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3744 IEM_SET_CPL(pVCpu, uNewCpl);
3745/** @todo Setting CPL this early seems wrong as it would affect and errors we
3746 * raise accessing the stack and (?) GDT/LDT... */
3747
3748 /* Create the stack frame. */
3749 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3750 RTPTRUNION uStackFrame;
3751 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3752 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 void * const pvStackFrame = uStackFrame.pv;
3756
3757 if (fFlags & IEM_XCPT_FLAGS_ERR)
3758 *uStackFrame.pu64++ = uErr;
3759 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3760 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3761 uStackFrame.pu64[2] = fEfl;
3762 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3763 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3764 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3765 if (rcStrict != VINF_SUCCESS)
3766 return rcStrict;
3767
3768 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3769 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3770 * after pushing the stack frame? (Write protect the gdt + stack to
3771 * find out.) */
3772 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3773 {
3774 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3778 }
3779
3780 /*
3781 * Start comitting the register changes.
3782 */
3783 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3784 * hidden registers when interrupting 32-bit or 16-bit code! */
3785 if (uNewCpl != uOldCpl)
3786 {
3787 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3788 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3789 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3790 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3791 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3792 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3793 }
3794 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3795 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3796 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3797 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3798 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3799 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3800 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3801 pVCpu->cpum.GstCtx.rip = uNewRip;
3802
3803 fEfl &= ~fEflToClear;
3804 IEMMISC_SET_EFL(pVCpu, fEfl);
3805
3806 if (fFlags & IEM_XCPT_FLAGS_CR2)
3807 pVCpu->cpum.GstCtx.cr2 = uCr2;
3808
3809 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3810 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3811
3812 iemRecalcExecModeAndCplFlags(pVCpu);
3813
3814 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3815}
3816
3817
3818/**
3819 * Implements exceptions and interrupts.
3820 *
3821 * All exceptions and interrupts goes thru this function!
3822 *
3823 * @returns VBox strict status code.
3824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3825 * @param cbInstr The number of bytes to offset rIP by in the return
3826 * address.
3827 * @param u8Vector The interrupt / exception vector number.
3828 * @param fFlags The flags.
3829 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3830 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3831 */
3832VBOXSTRICTRC
3833iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3834 uint8_t cbInstr,
3835 uint8_t u8Vector,
3836 uint32_t fFlags,
3837 uint16_t uErr,
3838 uint64_t uCr2) RT_NOEXCEPT
3839{
3840 /*
3841 * Get all the state that we might need here.
3842 */
3843 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3844 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3845
3846#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3847 /*
3848 * Flush prefetch buffer
3849 */
3850 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3851#endif
3852
3853 /*
3854 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3855 */
3856 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3857 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3858 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3859 | IEM_XCPT_FLAGS_BP_INSTR
3860 | IEM_XCPT_FLAGS_ICEBP_INSTR
3861 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3862 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3863 {
3864 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3865 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3866 u8Vector = X86_XCPT_GP;
3867 uErr = 0;
3868 }
3869#ifdef DBGFTRACE_ENABLED
3870 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3871 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3872 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3873#endif
3874
3875 /*
3876 * Evaluate whether NMI blocking should be in effect.
3877 * Normally, NMI blocking is in effect whenever we inject an NMI.
3878 */
3879 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3880 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3881
3882#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3883 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3884 {
3885 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3886 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3887 return rcStrict0;
3888
3889 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3890 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3891 {
3892 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3893 fBlockNmi = false;
3894 }
3895 }
3896#endif
3897
3898#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3899 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3900 {
3901 /*
3902 * If the event is being injected as part of VMRUN, it isn't subject to event
3903 * intercepts in the nested-guest. However, secondary exceptions that occur
3904 * during injection of any event -are- subject to exception intercepts.
3905 *
3906 * See AMD spec. 15.20 "Event Injection".
3907 */
3908 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3909 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3910 else
3911 {
3912 /*
3913 * Check and handle if the event being raised is intercepted.
3914 */
3915 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3916 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3917 return rcStrict0;
3918 }
3919 }
3920#endif
3921
3922 /*
3923 * Set NMI blocking if necessary.
3924 */
3925 if (fBlockNmi)
3926 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3927
3928 /*
3929 * Do recursion accounting.
3930 */
3931 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3932 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3933 if (pVCpu->iem.s.cXcptRecursions == 0)
3934 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3935 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3936 else
3937 {
3938 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3939 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3940 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3941
3942 if (pVCpu->iem.s.cXcptRecursions >= 4)
3943 {
3944#ifdef DEBUG_bird
3945 AssertFailed();
3946#endif
3947 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3948 }
3949
3950 /*
3951 * Evaluate the sequence of recurring events.
3952 */
3953 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3954 NULL /* pXcptRaiseInfo */);
3955 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3956 { /* likely */ }
3957 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3958 {
3959 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3960 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3961 u8Vector = X86_XCPT_DF;
3962 uErr = 0;
3963#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3964 /* VMX nested-guest #DF intercept needs to be checked here. */
3965 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3966 {
3967 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3968 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3969 return rcStrict0;
3970 }
3971#endif
3972 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3973 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3974 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3975 }
3976 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3977 {
3978 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3979 return iemInitiateCpuShutdown(pVCpu);
3980 }
3981 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3982 {
3983 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3984 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3985 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3986 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3987 return VERR_EM_GUEST_CPU_HANG;
3988 }
3989 else
3990 {
3991 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3992 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3993 return VERR_IEM_IPE_9;
3994 }
3995
3996 /*
3997 * The 'EXT' bit is set when an exception occurs during deliver of an external
3998 * event (such as an interrupt or earlier exception)[1]. Privileged software
3999 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4000 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4001 *
4002 * [1] - Intel spec. 6.13 "Error Code"
4003 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4004 * [3] - Intel Instruction reference for INT n.
4005 */
4006 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4007 && (fFlags & IEM_XCPT_FLAGS_ERR)
4008 && u8Vector != X86_XCPT_PF
4009 && u8Vector != X86_XCPT_DF)
4010 {
4011 uErr |= X86_TRAP_ERR_EXTERNAL;
4012 }
4013 }
4014
4015 pVCpu->iem.s.cXcptRecursions++;
4016 pVCpu->iem.s.uCurXcpt = u8Vector;
4017 pVCpu->iem.s.fCurXcpt = fFlags;
4018 pVCpu->iem.s.uCurXcptErr = uErr;
4019 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4020
4021 /*
4022 * Extensive logging.
4023 */
4024#if defined(LOG_ENABLED) && defined(IN_RING3)
4025 if (LogIs3Enabled())
4026 {
4027 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4028 PVM pVM = pVCpu->CTX_SUFF(pVM);
4029 char szRegs[4096];
4030 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4031 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4032 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4033 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4034 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4035 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4036 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4037 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4038 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4039 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4040 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4041 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4042 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4043 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4044 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4045 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4046 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4047 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4048 " efer=%016VR{efer}\n"
4049 " pat=%016VR{pat}\n"
4050 " sf_mask=%016VR{sf_mask}\n"
4051 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4052 " lstar=%016VR{lstar}\n"
4053 " star=%016VR{star} cstar=%016VR{cstar}\n"
4054 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4055 );
4056
4057 char szInstr[256];
4058 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4059 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4060 szInstr, sizeof(szInstr), NULL);
4061 Log3(("%s%s\n", szRegs, szInstr));
4062 }
4063#endif /* LOG_ENABLED */
4064
4065 /*
4066 * Stats.
4067 */
4068 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4069 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4070 else if (u8Vector <= X86_XCPT_LAST)
4071 {
4072 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4073 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4074 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4075 }
4076
4077 /*
4078 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4079 * to ensure that a stale TLB or paging cache entry will only cause one
4080 * spurious #PF.
4081 */
4082 if ( u8Vector == X86_XCPT_PF
4083 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4084 IEMTlbInvalidatePage(pVCpu, uCr2);
4085
4086 /*
4087 * Call the mode specific worker function.
4088 */
4089 VBOXSTRICTRC rcStrict;
4090 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4091 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4092 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4093 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4094 else
4095 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4096
4097 /* Flush the prefetch buffer. */
4098 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4099
4100 /*
4101 * Unwind.
4102 */
4103 pVCpu->iem.s.cXcptRecursions--;
4104 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4105 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4106 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4107 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4108 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4109 return rcStrict;
4110}
4111
4112#ifdef IEM_WITH_SETJMP
4113/**
4114 * See iemRaiseXcptOrInt. Will not return.
4115 */
4116DECL_NO_RETURN(void)
4117iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4118 uint8_t cbInstr,
4119 uint8_t u8Vector,
4120 uint32_t fFlags,
4121 uint16_t uErr,
4122 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4123{
4124 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4125 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4126}
4127#endif
4128
4129
4130/** \#DE - 00. */
4131VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4132{
4133 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4134}
4135
4136
4137/** \#DB - 01.
4138 * @note This automatically clear DR7.GD. */
4139VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4140{
4141 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4142 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4143 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4144}
4145
4146
4147/** \#BR - 05. */
4148VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4149{
4150 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4151}
4152
4153
4154/** \#UD - 06. */
4155VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4156{
4157 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4158}
4159
4160
4161/** \#NM - 07. */
4162VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4163{
4164 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4165}
4166
4167
4168/** \#TS(err) - 0a. */
4169VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4170{
4171 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4172}
4173
4174
4175/** \#TS(tr) - 0a. */
4176VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4177{
4178 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4179 pVCpu->cpum.GstCtx.tr.Sel, 0);
4180}
4181
4182
4183/** \#TS(0) - 0a. */
4184VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4185{
4186 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4187 0, 0);
4188}
4189
4190
4191/** \#TS(err) - 0a. */
4192VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4193{
4194 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4195 uSel & X86_SEL_MASK_OFF_RPL, 0);
4196}
4197
4198
4199/** \#NP(err) - 0b. */
4200VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4201{
4202 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4203}
4204
4205
4206/** \#NP(sel) - 0b. */
4207VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4210 uSel & ~X86_SEL_RPL, 0);
4211}
4212
4213
4214/** \#SS(seg) - 0c. */
4215VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4218 uSel & ~X86_SEL_RPL, 0);
4219}
4220
4221
4222/** \#SS(err) - 0c. */
4223VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4224{
4225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4226}
4227
4228
4229/** \#GP(n) - 0d. */
4230VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4233}
4234
4235
4236/** \#GP(0) - 0d. */
4237VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4238{
4239 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4240}
4241
4242#ifdef IEM_WITH_SETJMP
4243/** \#GP(0) - 0d. */
4244DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4245{
4246 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4247}
4248#endif
4249
4250
4251/** \#GP(sel) - 0d. */
4252VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4253{
4254 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4255 Sel & ~X86_SEL_RPL, 0);
4256}
4257
4258
4259/** \#GP(0) - 0d. */
4260VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4261{
4262 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4263}
4264
4265
4266/** \#GP(sel) - 0d. */
4267VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4268{
4269 NOREF(iSegReg); NOREF(fAccess);
4270 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4271 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4272}
4273
4274#ifdef IEM_WITH_SETJMP
4275/** \#GP(sel) - 0d, longjmp. */
4276DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4277{
4278 NOREF(iSegReg); NOREF(fAccess);
4279 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4280 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4281}
4282#endif
4283
4284/** \#GP(sel) - 0d. */
4285VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4286{
4287 NOREF(Sel);
4288 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4289}
4290
4291#ifdef IEM_WITH_SETJMP
4292/** \#GP(sel) - 0d, longjmp. */
4293DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4294{
4295 NOREF(Sel);
4296 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4297}
4298#endif
4299
4300
4301/** \#GP(sel) - 0d. */
4302VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4303{
4304 NOREF(iSegReg); NOREF(fAccess);
4305 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4306}
4307
4308#ifdef IEM_WITH_SETJMP
4309/** \#GP(sel) - 0d, longjmp. */
4310DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4311{
4312 NOREF(iSegReg); NOREF(fAccess);
4313 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4314}
4315#endif
4316
4317
4318/** \#PF(n) - 0e. */
4319VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4320{
4321 uint16_t uErr;
4322 switch (rc)
4323 {
4324 case VERR_PAGE_NOT_PRESENT:
4325 case VERR_PAGE_TABLE_NOT_PRESENT:
4326 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4327 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4328 uErr = 0;
4329 break;
4330
4331 default:
4332 AssertMsgFailed(("%Rrc\n", rc));
4333 RT_FALL_THRU();
4334 case VERR_ACCESS_DENIED:
4335 uErr = X86_TRAP_PF_P;
4336 break;
4337
4338 /** @todo reserved */
4339 }
4340
4341 if (IEM_GET_CPL(pVCpu) == 3)
4342 uErr |= X86_TRAP_PF_US;
4343
4344 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4345 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4346 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4347 uErr |= X86_TRAP_PF_ID;
4348
4349#if 0 /* This is so much non-sense, really. Why was it done like that? */
4350 /* Note! RW access callers reporting a WRITE protection fault, will clear
4351 the READ flag before calling. So, read-modify-write accesses (RW)
4352 can safely be reported as READ faults. */
4353 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4354 uErr |= X86_TRAP_PF_RW;
4355#else
4356 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4357 {
4358 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4359 /// (regardless of outcome of the comparison in the latter case).
4360 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4361 uErr |= X86_TRAP_PF_RW;
4362 }
4363#endif
4364
4365 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4366 of the memory operand rather than at the start of it. (Not sure what
4367 happens if it crosses a page boundrary.) The current heuristics for
4368 this is to report the #PF for the last byte if the access is more than
4369 64 bytes. This is probably not correct, but we can work that out later,
4370 main objective now is to get FXSAVE to work like for real hardware and
4371 make bs3-cpu-basic2 work. */
4372 if (cbAccess <= 64)
4373 { /* likely*/ }
4374 else
4375 GCPtrWhere += cbAccess - 1;
4376
4377 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4378 uErr, GCPtrWhere);
4379}
4380
4381#ifdef IEM_WITH_SETJMP
4382/** \#PF(n) - 0e, longjmp. */
4383DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4384 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4385{
4386 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4387}
4388#endif
4389
4390
4391/** \#MF(0) - 10. */
4392VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4393{
4394 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4395 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4396
4397 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4398 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4399 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4400}
4401
4402
4403/** \#AC(0) - 11. */
4404VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4405{
4406 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4407}
4408
4409#ifdef IEM_WITH_SETJMP
4410/** \#AC(0) - 11, longjmp. */
4411DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4412{
4413 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4414}
4415#endif
4416
4417
4418/** \#XF(0)/\#XM(0) - 19. */
4419VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4420{
4421 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4422}
4423
4424
4425/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4426IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4427{
4428 NOREF(cbInstr);
4429 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4430}
4431
4432
4433/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4434IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4435{
4436 NOREF(cbInstr);
4437 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4438}
4439
4440
4441/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4442IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4443{
4444 NOREF(cbInstr);
4445 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4446}
4447
4448
4449/** @} */
4450
4451/** @name Common opcode decoders.
4452 * @{
4453 */
4454//#include <iprt/mem.h>
4455
4456/**
4457 * Used to add extra details about a stub case.
4458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4459 */
4460void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4461{
4462#if defined(LOG_ENABLED) && defined(IN_RING3)
4463 PVM pVM = pVCpu->CTX_SUFF(pVM);
4464 char szRegs[4096];
4465 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4466 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4467 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4468 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4469 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4470 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4471 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4472 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4473 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4474 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4475 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4476 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4477 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4478 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4479 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4480 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4481 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4482 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4483 " efer=%016VR{efer}\n"
4484 " pat=%016VR{pat}\n"
4485 " sf_mask=%016VR{sf_mask}\n"
4486 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4487 " lstar=%016VR{lstar}\n"
4488 " star=%016VR{star} cstar=%016VR{cstar}\n"
4489 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4490 );
4491
4492 char szInstr[256];
4493 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4494 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4495 szInstr, sizeof(szInstr), NULL);
4496
4497 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4498#else
4499 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4500#endif
4501}
4502
4503/** @} */
4504
4505
4506
4507/** @name Register Access.
4508 * @{
4509 */
4510
4511/**
4512 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4513 *
4514 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4515 * segment limit.
4516 *
4517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4518 * @param cbInstr Instruction size.
4519 * @param offNextInstr The offset of the next instruction.
4520 * @param enmEffOpSize Effective operand size.
4521 */
4522VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4523 IEMMODE enmEffOpSize) RT_NOEXCEPT
4524{
4525 switch (enmEffOpSize)
4526 {
4527 case IEMMODE_16BIT:
4528 {
4529 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4530 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4531 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4532 pVCpu->cpum.GstCtx.rip = uNewIp;
4533 else
4534 return iemRaiseGeneralProtectionFault0(pVCpu);
4535 break;
4536 }
4537
4538 case IEMMODE_32BIT:
4539 {
4540 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4541 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4542
4543 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4544 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4545 pVCpu->cpum.GstCtx.rip = uNewEip;
4546 else
4547 return iemRaiseGeneralProtectionFault0(pVCpu);
4548 break;
4549 }
4550
4551 case IEMMODE_64BIT:
4552 {
4553 Assert(IEM_IS_64BIT_CODE(pVCpu));
4554
4555 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4556 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4557 pVCpu->cpum.GstCtx.rip = uNewRip;
4558 else
4559 return iemRaiseGeneralProtectionFault0(pVCpu);
4560 break;
4561 }
4562
4563 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4564 }
4565
4566#ifndef IEM_WITH_CODE_TLB
4567 /* Flush the prefetch buffer. */
4568 pVCpu->iem.s.cbOpcode = cbInstr;
4569#endif
4570
4571 /*
4572 * Clear RF and finish the instruction (maybe raise #DB).
4573 */
4574 return iemRegFinishClearingRF(pVCpu);
4575}
4576
4577
4578/**
4579 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4580 *
4581 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4582 * segment limit.
4583 *
4584 * @returns Strict VBox status code.
4585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4586 * @param cbInstr Instruction size.
4587 * @param offNextInstr The offset of the next instruction.
4588 */
4589VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4590{
4591 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4592
4593 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4594 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4595 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4596 pVCpu->cpum.GstCtx.rip = uNewIp;
4597 else
4598 return iemRaiseGeneralProtectionFault0(pVCpu);
4599
4600#ifndef IEM_WITH_CODE_TLB
4601 /* Flush the prefetch buffer. */
4602 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4603#endif
4604
4605 /*
4606 * Clear RF and finish the instruction (maybe raise #DB).
4607 */
4608 return iemRegFinishClearingRF(pVCpu);
4609}
4610
4611
4612/**
4613 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4614 *
4615 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4616 * segment limit.
4617 *
4618 * @returns Strict VBox status code.
4619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4620 * @param cbInstr Instruction size.
4621 * @param offNextInstr The offset of the next instruction.
4622 * @param enmEffOpSize Effective operand size.
4623 */
4624VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4625 IEMMODE enmEffOpSize) RT_NOEXCEPT
4626{
4627 if (enmEffOpSize == IEMMODE_32BIT)
4628 {
4629 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4630
4631 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4632 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4633 pVCpu->cpum.GstCtx.rip = uNewEip;
4634 else
4635 return iemRaiseGeneralProtectionFault0(pVCpu);
4636 }
4637 else
4638 {
4639 Assert(enmEffOpSize == IEMMODE_64BIT);
4640
4641 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4642 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4643 pVCpu->cpum.GstCtx.rip = uNewRip;
4644 else
4645 return iemRaiseGeneralProtectionFault0(pVCpu);
4646 }
4647
4648#ifndef IEM_WITH_CODE_TLB
4649 /* Flush the prefetch buffer. */
4650 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4651#endif
4652
4653 /*
4654 * Clear RF and finish the instruction (maybe raise #DB).
4655 */
4656 return iemRegFinishClearingRF(pVCpu);
4657}
4658
4659
4660/**
4661 * Performs a near jump to the specified address.
4662 *
4663 * May raise a \#GP(0) if the new IP outside the code segment limit.
4664 *
4665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4666 * @param uNewIp The new IP value.
4667 */
4668VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4669{
4670 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4671 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4672 pVCpu->cpum.GstCtx.rip = uNewIp;
4673 else
4674 return iemRaiseGeneralProtectionFault0(pVCpu);
4675 /** @todo Test 16-bit jump in 64-bit mode. */
4676
4677#ifndef IEM_WITH_CODE_TLB
4678 /* Flush the prefetch buffer. */
4679 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4680#endif
4681
4682 /*
4683 * Clear RF and finish the instruction (maybe raise #DB).
4684 */
4685 return iemRegFinishClearingRF(pVCpu);
4686}
4687
4688
4689/**
4690 * Performs a near jump to the specified address.
4691 *
4692 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4693 *
4694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4695 * @param uNewEip The new EIP value.
4696 */
4697VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4698{
4699 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4700 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4701
4702 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4703 pVCpu->cpum.GstCtx.rip = uNewEip;
4704 else
4705 return iemRaiseGeneralProtectionFault0(pVCpu);
4706
4707#ifndef IEM_WITH_CODE_TLB
4708 /* Flush the prefetch buffer. */
4709 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4710#endif
4711
4712 /*
4713 * Clear RF and finish the instruction (maybe raise #DB).
4714 */
4715 return iemRegFinishClearingRF(pVCpu);
4716}
4717
4718
4719/**
4720 * Performs a near jump to the specified address.
4721 *
4722 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4723 * segment limit.
4724 *
4725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4726 * @param uNewRip The new RIP value.
4727 */
4728VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4729{
4730 Assert(IEM_IS_64BIT_CODE(pVCpu));
4731
4732 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4733 pVCpu->cpum.GstCtx.rip = uNewRip;
4734 else
4735 return iemRaiseGeneralProtectionFault0(pVCpu);
4736
4737#ifndef IEM_WITH_CODE_TLB
4738 /* Flush the prefetch buffer. */
4739 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4740#endif
4741
4742 /*
4743 * Clear RF and finish the instruction (maybe raise #DB).
4744 */
4745 return iemRegFinishClearingRF(pVCpu);
4746}
4747
4748/** @} */
4749
4750
4751/** @name FPU access and helpers.
4752 *
4753 * @{
4754 */
4755
4756/**
4757 * Updates the x87.DS and FPUDP registers.
4758 *
4759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4760 * @param pFpuCtx The FPU context.
4761 * @param iEffSeg The effective segment register.
4762 * @param GCPtrEff The effective address relative to @a iEffSeg.
4763 */
4764DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4765{
4766 RTSEL sel;
4767 switch (iEffSeg)
4768 {
4769 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4770 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4771 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4772 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4773 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4774 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4775 default:
4776 AssertMsgFailed(("%d\n", iEffSeg));
4777 sel = pVCpu->cpum.GstCtx.ds.Sel;
4778 }
4779 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4780 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4781 {
4782 pFpuCtx->DS = 0;
4783 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4784 }
4785 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4786 {
4787 pFpuCtx->DS = sel;
4788 pFpuCtx->FPUDP = GCPtrEff;
4789 }
4790 else
4791 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4792}
4793
4794
4795/**
4796 * Rotates the stack registers in the push direction.
4797 *
4798 * @param pFpuCtx The FPU context.
4799 * @remarks This is a complete waste of time, but fxsave stores the registers in
4800 * stack order.
4801 */
4802DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4803{
4804 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4805 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4806 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4807 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4808 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4809 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4810 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4811 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4812 pFpuCtx->aRegs[0].r80 = r80Tmp;
4813}
4814
4815
4816/**
4817 * Rotates the stack registers in the pop direction.
4818 *
4819 * @param pFpuCtx The FPU context.
4820 * @remarks This is a complete waste of time, but fxsave stores the registers in
4821 * stack order.
4822 */
4823DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4824{
4825 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4826 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4827 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4828 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4829 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4830 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4831 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4832 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4833 pFpuCtx->aRegs[7].r80 = r80Tmp;
4834}
4835
4836
4837/**
4838 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4839 * exception prevents it.
4840 *
4841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4842 * @param pResult The FPU operation result to push.
4843 * @param pFpuCtx The FPU context.
4844 */
4845static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4846{
4847 /* Update FSW and bail if there are pending exceptions afterwards. */
4848 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4849 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4850 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4851 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4852 {
4853 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4854 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4855 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4856 pFpuCtx->FSW = fFsw;
4857 return;
4858 }
4859
4860 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4861 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4862 {
4863 /* All is fine, push the actual value. */
4864 pFpuCtx->FTW |= RT_BIT(iNewTop);
4865 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4866 }
4867 else if (pFpuCtx->FCW & X86_FCW_IM)
4868 {
4869 /* Masked stack overflow, push QNaN. */
4870 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4871 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4872 }
4873 else
4874 {
4875 /* Raise stack overflow, don't push anything. */
4876 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4877 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4878 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4879 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4880 return;
4881 }
4882
4883 fFsw &= ~X86_FSW_TOP_MASK;
4884 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4885 pFpuCtx->FSW = fFsw;
4886
4887 iemFpuRotateStackPush(pFpuCtx);
4888 RT_NOREF(pVCpu);
4889}
4890
4891
4892/**
4893 * Stores a result in a FPU register and updates the FSW and FTW.
4894 *
4895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4896 * @param pFpuCtx The FPU context.
4897 * @param pResult The result to store.
4898 * @param iStReg Which FPU register to store it in.
4899 */
4900static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4901{
4902 Assert(iStReg < 8);
4903 uint16_t fNewFsw = pFpuCtx->FSW;
4904 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4905 fNewFsw &= ~X86_FSW_C_MASK;
4906 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4907 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4908 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4909 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4910 pFpuCtx->FSW = fNewFsw;
4911 pFpuCtx->FTW |= RT_BIT(iReg);
4912 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4913 RT_NOREF(pVCpu);
4914}
4915
4916
4917/**
4918 * Only updates the FPU status word (FSW) with the result of the current
4919 * instruction.
4920 *
4921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4922 * @param pFpuCtx The FPU context.
4923 * @param u16FSW The FSW output of the current instruction.
4924 */
4925static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4926{
4927 uint16_t fNewFsw = pFpuCtx->FSW;
4928 fNewFsw &= ~X86_FSW_C_MASK;
4929 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4930 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4931 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4932 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4933 pFpuCtx->FSW = fNewFsw;
4934 RT_NOREF(pVCpu);
4935}
4936
4937
4938/**
4939 * Pops one item off the FPU stack if no pending exception prevents it.
4940 *
4941 * @param pFpuCtx The FPU context.
4942 */
4943static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4944{
4945 /* Check pending exceptions. */
4946 uint16_t uFSW = pFpuCtx->FSW;
4947 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4948 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4949 return;
4950
4951 /* TOP--. */
4952 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4953 uFSW &= ~X86_FSW_TOP_MASK;
4954 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4955 pFpuCtx->FSW = uFSW;
4956
4957 /* Mark the previous ST0 as empty. */
4958 iOldTop >>= X86_FSW_TOP_SHIFT;
4959 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4960
4961 /* Rotate the registers. */
4962 iemFpuRotateStackPop(pFpuCtx);
4963}
4964
4965
4966/**
4967 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4968 *
4969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4970 * @param pResult The FPU operation result to push.
4971 * @param uFpuOpcode The FPU opcode value.
4972 */
4973void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4974{
4975 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4976 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4977 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4978}
4979
4980
4981/**
4982 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4983 * and sets FPUDP and FPUDS.
4984 *
4985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4986 * @param pResult The FPU operation result to push.
4987 * @param iEffSeg The effective segment register.
4988 * @param GCPtrEff The effective address relative to @a iEffSeg.
4989 * @param uFpuOpcode The FPU opcode value.
4990 */
4991void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4992 uint16_t uFpuOpcode) RT_NOEXCEPT
4993{
4994 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4995 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4996 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4997 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4998}
4999
5000
5001/**
5002 * Replace ST0 with the first value and push the second onto the FPU stack,
5003 * unless a pending exception prevents it.
5004 *
5005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5006 * @param pResult The FPU operation result to store and push.
5007 * @param uFpuOpcode The FPU opcode value.
5008 */
5009void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5010{
5011 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5012 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5013
5014 /* Update FSW and bail if there are pending exceptions afterwards. */
5015 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5016 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5017 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5018 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5019 {
5020 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5021 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5023 pFpuCtx->FSW = fFsw;
5024 return;
5025 }
5026
5027 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5028 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5029 {
5030 /* All is fine, push the actual value. */
5031 pFpuCtx->FTW |= RT_BIT(iNewTop);
5032 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5033 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5034 }
5035 else if (pFpuCtx->FCW & X86_FCW_IM)
5036 {
5037 /* Masked stack overflow, push QNaN. */
5038 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5039 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5040 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5041 }
5042 else
5043 {
5044 /* Raise stack overflow, don't push anything. */
5045 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5046 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5047 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5048 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5049 return;
5050 }
5051
5052 fFsw &= ~X86_FSW_TOP_MASK;
5053 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5054 pFpuCtx->FSW = fFsw;
5055
5056 iemFpuRotateStackPush(pFpuCtx);
5057}
5058
5059
5060/**
5061 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5062 * FOP.
5063 *
5064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5065 * @param pResult The result to store.
5066 * @param iStReg Which FPU register to store it in.
5067 * @param uFpuOpcode The FPU opcode value.
5068 */
5069void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5070{
5071 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5072 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5073 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5074}
5075
5076
5077/**
5078 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5079 * FOP, and then pops the stack.
5080 *
5081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5082 * @param pResult The result to store.
5083 * @param iStReg Which FPU register to store it in.
5084 * @param uFpuOpcode The FPU opcode value.
5085 */
5086void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5087{
5088 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5089 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5090 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5091 iemFpuMaybePopOne(pFpuCtx);
5092}
5093
5094
5095/**
5096 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5097 * FPUDP, and FPUDS.
5098 *
5099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5100 * @param pResult The result to store.
5101 * @param iStReg Which FPU register to store it in.
5102 * @param iEffSeg The effective memory operand selector register.
5103 * @param GCPtrEff The effective memory operand offset.
5104 * @param uFpuOpcode The FPU opcode value.
5105 */
5106void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5107 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5108{
5109 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5110 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5111 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5112 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5113}
5114
5115
5116/**
5117 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5118 * FPUDP, and FPUDS, and then pops the stack.
5119 *
5120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5121 * @param pResult The result to store.
5122 * @param iStReg Which FPU register to store it in.
5123 * @param iEffSeg The effective memory operand selector register.
5124 * @param GCPtrEff The effective memory operand offset.
5125 * @param uFpuOpcode The FPU opcode value.
5126 */
5127void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5128 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5129{
5130 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5131 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5132 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5133 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5134 iemFpuMaybePopOne(pFpuCtx);
5135}
5136
5137
5138/**
5139 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5140 *
5141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5142 * @param uFpuOpcode The FPU opcode value.
5143 */
5144void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5145{
5146 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5147 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5148}
5149
5150
5151/**
5152 * Updates the FSW, FOP, FPUIP, and FPUCS.
5153 *
5154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5155 * @param u16FSW The FSW from the current instruction.
5156 * @param uFpuOpcode The FPU opcode value.
5157 */
5158void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5159{
5160 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5161 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5162 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5163}
5164
5165
5166/**
5167 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5168 *
5169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5170 * @param u16FSW The FSW from the current instruction.
5171 * @param uFpuOpcode The FPU opcode value.
5172 */
5173void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5174{
5175 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5176 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5177 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5178 iemFpuMaybePopOne(pFpuCtx);
5179}
5180
5181
5182/**
5183 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5184 *
5185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5186 * @param u16FSW The FSW from the current instruction.
5187 * @param iEffSeg The effective memory operand selector register.
5188 * @param GCPtrEff The effective memory operand offset.
5189 * @param uFpuOpcode The FPU opcode value.
5190 */
5191void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5192{
5193 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5194 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5195 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5196 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5197}
5198
5199
5200/**
5201 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5202 *
5203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5204 * @param u16FSW The FSW from the current instruction.
5205 * @param uFpuOpcode The FPU opcode value.
5206 */
5207void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5208{
5209 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5210 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5211 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5212 iemFpuMaybePopOne(pFpuCtx);
5213 iemFpuMaybePopOne(pFpuCtx);
5214}
5215
5216
5217/**
5218 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5219 *
5220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5221 * @param u16FSW The FSW from the current instruction.
5222 * @param iEffSeg The effective memory operand selector register.
5223 * @param GCPtrEff The effective memory operand offset.
5224 * @param uFpuOpcode The FPU opcode value.
5225 */
5226void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5227 uint16_t uFpuOpcode) RT_NOEXCEPT
5228{
5229 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5230 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5231 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5232 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5233 iemFpuMaybePopOne(pFpuCtx);
5234}
5235
5236
5237/**
5238 * Worker routine for raising an FPU stack underflow exception.
5239 *
5240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5241 * @param pFpuCtx The FPU context.
5242 * @param iStReg The stack register being accessed.
5243 */
5244static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5245{
5246 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5247 if (pFpuCtx->FCW & X86_FCW_IM)
5248 {
5249 /* Masked underflow. */
5250 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5251 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5252 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5253 if (iStReg != UINT8_MAX)
5254 {
5255 pFpuCtx->FTW |= RT_BIT(iReg);
5256 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5257 }
5258 }
5259 else
5260 {
5261 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5262 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5263 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5264 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5265 }
5266 RT_NOREF(pVCpu);
5267}
5268
5269
5270/**
5271 * Raises a FPU stack underflow exception.
5272 *
5273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5274 * @param iStReg The destination register that should be loaded
5275 * with QNaN if \#IS is not masked. Specify
5276 * UINT8_MAX if none (like for fcom).
5277 * @param uFpuOpcode The FPU opcode value.
5278 */
5279void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5280{
5281 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5282 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5283 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5284}
5285
5286
5287void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5288{
5289 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5290 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5291 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5292 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5293}
5294
5295
5296void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5297{
5298 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5299 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5300 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5301 iemFpuMaybePopOne(pFpuCtx);
5302}
5303
5304
5305void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5306 uint16_t uFpuOpcode) RT_NOEXCEPT
5307{
5308 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5309 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5310 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5311 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5312 iemFpuMaybePopOne(pFpuCtx);
5313}
5314
5315
5316void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5317{
5318 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5319 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5320 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5321 iemFpuMaybePopOne(pFpuCtx);
5322 iemFpuMaybePopOne(pFpuCtx);
5323}
5324
5325
5326void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5327{
5328 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5329 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5330
5331 if (pFpuCtx->FCW & X86_FCW_IM)
5332 {
5333 /* Masked overflow - Push QNaN. */
5334 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5335 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5336 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5337 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5338 pFpuCtx->FTW |= RT_BIT(iNewTop);
5339 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5340 iemFpuRotateStackPush(pFpuCtx);
5341 }
5342 else
5343 {
5344 /* Exception pending - don't change TOP or the register stack. */
5345 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5346 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5347 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5348 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5349 }
5350}
5351
5352
5353void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5354{
5355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5356 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5357
5358 if (pFpuCtx->FCW & X86_FCW_IM)
5359 {
5360 /* Masked overflow - Push QNaN. */
5361 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5362 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5363 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5364 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5365 pFpuCtx->FTW |= RT_BIT(iNewTop);
5366 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5367 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5368 iemFpuRotateStackPush(pFpuCtx);
5369 }
5370 else
5371 {
5372 /* Exception pending - don't change TOP or the register stack. */
5373 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5374 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5375 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5376 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5377 }
5378}
5379
5380
5381/**
5382 * Worker routine for raising an FPU stack overflow exception on a push.
5383 *
5384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5385 * @param pFpuCtx The FPU context.
5386 */
5387static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5388{
5389 if (pFpuCtx->FCW & X86_FCW_IM)
5390 {
5391 /* Masked overflow. */
5392 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5393 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5394 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5395 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5396 pFpuCtx->FTW |= RT_BIT(iNewTop);
5397 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5398 iemFpuRotateStackPush(pFpuCtx);
5399 }
5400 else
5401 {
5402 /* Exception pending - don't change TOP or the register stack. */
5403 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5404 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5405 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5406 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5407 }
5408 RT_NOREF(pVCpu);
5409}
5410
5411
5412/**
5413 * Raises a FPU stack overflow exception on a push.
5414 *
5415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5416 * @param uFpuOpcode The FPU opcode value.
5417 */
5418void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5419{
5420 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5421 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5422 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5423}
5424
5425
5426/**
5427 * Raises a FPU stack overflow exception on a push with a memory operand.
5428 *
5429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5430 * @param iEffSeg The effective memory operand selector register.
5431 * @param GCPtrEff The effective memory operand offset.
5432 * @param uFpuOpcode The FPU opcode value.
5433 */
5434void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5435{
5436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5437 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5438 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5439 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5440}
5441
5442/** @} */
5443
5444
5445/** @name SSE+AVX SIMD access and helpers.
5446 *
5447 * @{
5448 */
5449/**
5450 * Stores a result in a SIMD XMM register, updates the MXCSR.
5451 *
5452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5453 * @param pResult The result to store.
5454 * @param iXmmReg Which SIMD XMM register to store the result in.
5455 */
5456void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5457{
5458 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5459 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5460
5461 /* The result is only updated if there is no unmasked exception pending. */
5462 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5463 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5464 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5465}
5466
5467
5468/**
5469 * Updates the MXCSR.
5470 *
5471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5472 * @param fMxcsr The new MXCSR value.
5473 */
5474void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5475{
5476 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5477 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5478}
5479/** @} */
5480
5481
5482/** @name Memory access.
5483 *
5484 * @{
5485 */
5486
5487#undef LOG_GROUP
5488#define LOG_GROUP LOG_GROUP_IEM_MEM
5489
5490/**
5491 * Updates the IEMCPU::cbWritten counter if applicable.
5492 *
5493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5494 * @param fAccess The access being accounted for.
5495 * @param cbMem The access size.
5496 */
5497DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5498{
5499 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5500 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5501 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5502}
5503
5504
5505/**
5506 * Applies the segment limit, base and attributes.
5507 *
5508 * This may raise a \#GP or \#SS.
5509 *
5510 * @returns VBox strict status code.
5511 *
5512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5513 * @param fAccess The kind of access which is being performed.
5514 * @param iSegReg The index of the segment register to apply.
5515 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5516 * TSS, ++).
5517 * @param cbMem The access size.
5518 * @param pGCPtrMem Pointer to the guest memory address to apply
5519 * segmentation to. Input and output parameter.
5520 */
5521VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5522{
5523 if (iSegReg == UINT8_MAX)
5524 return VINF_SUCCESS;
5525
5526 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5527 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5528 switch (IEM_GET_CPU_MODE(pVCpu))
5529 {
5530 case IEMMODE_16BIT:
5531 case IEMMODE_32BIT:
5532 {
5533 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5534 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5535
5536 if ( pSel->Attr.n.u1Present
5537 && !pSel->Attr.n.u1Unusable)
5538 {
5539 Assert(pSel->Attr.n.u1DescType);
5540 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5541 {
5542 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5543 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5544 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5545
5546 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5547 {
5548 /** @todo CPL check. */
5549 }
5550
5551 /*
5552 * There are two kinds of data selectors, normal and expand down.
5553 */
5554 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5555 {
5556 if ( GCPtrFirst32 > pSel->u32Limit
5557 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5558 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5559 }
5560 else
5561 {
5562 /*
5563 * The upper boundary is defined by the B bit, not the G bit!
5564 */
5565 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5566 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5567 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5568 }
5569 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5570 }
5571 else
5572 {
5573 /*
5574 * Code selector and usually be used to read thru, writing is
5575 * only permitted in real and V8086 mode.
5576 */
5577 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5578 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5579 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5580 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5581 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5582
5583 if ( GCPtrFirst32 > pSel->u32Limit
5584 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5585 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5586
5587 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5588 {
5589 /** @todo CPL check. */
5590 }
5591
5592 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5593 }
5594 }
5595 else
5596 return iemRaiseGeneralProtectionFault0(pVCpu);
5597 return VINF_SUCCESS;
5598 }
5599
5600 case IEMMODE_64BIT:
5601 {
5602 RTGCPTR GCPtrMem = *pGCPtrMem;
5603 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5604 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5605
5606 Assert(cbMem >= 1);
5607 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5608 return VINF_SUCCESS;
5609 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5610 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5611 return iemRaiseGeneralProtectionFault0(pVCpu);
5612 }
5613
5614 default:
5615 AssertFailedReturn(VERR_IEM_IPE_7);
5616 }
5617}
5618
5619
5620/**
5621 * Translates a virtual address to a physical physical address and checks if we
5622 * can access the page as specified.
5623 *
5624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5625 * @param GCPtrMem The virtual address.
5626 * @param cbAccess The access size, for raising \#PF correctly for
5627 * FXSAVE and such.
5628 * @param fAccess The intended access.
5629 * @param pGCPhysMem Where to return the physical address.
5630 */
5631VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5632 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5633{
5634 /** @todo Need a different PGM interface here. We're currently using
5635 * generic / REM interfaces. this won't cut it for R0. */
5636 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5637 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5638 * here. */
5639 PGMPTWALK Walk;
5640 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5641 if (RT_FAILURE(rc))
5642 {
5643 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5644 /** @todo Check unassigned memory in unpaged mode. */
5645 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5647 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5648 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5649#endif
5650 *pGCPhysMem = NIL_RTGCPHYS;
5651 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5652 }
5653
5654 /* If the page is writable and does not have the no-exec bit set, all
5655 access is allowed. Otherwise we'll have to check more carefully... */
5656 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5657 {
5658 /* Write to read only memory? */
5659 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5660 && !(Walk.fEffective & X86_PTE_RW)
5661 && ( ( IEM_GET_CPL(pVCpu) == 3
5662 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5663 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5664 {
5665 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5666 *pGCPhysMem = NIL_RTGCPHYS;
5667#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5668 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5669 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5670#endif
5671 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5672 }
5673
5674 /* Kernel memory accessed by userland? */
5675 if ( !(Walk.fEffective & X86_PTE_US)
5676 && IEM_GET_CPL(pVCpu) == 3
5677 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5678 {
5679 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5680 *pGCPhysMem = NIL_RTGCPHYS;
5681#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5682 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5683 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5684#endif
5685 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5686 }
5687
5688 /* Executing non-executable memory? */
5689 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5690 && (Walk.fEffective & X86_PTE_PAE_NX)
5691 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5692 {
5693 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5694 *pGCPhysMem = NIL_RTGCPHYS;
5695#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5696 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5697 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5698#endif
5699 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5700 VERR_ACCESS_DENIED);
5701 }
5702 }
5703
5704 /*
5705 * Set the dirty / access flags.
5706 * ASSUMES this is set when the address is translated rather than on committ...
5707 */
5708 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5709 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5710 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5711 {
5712 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5713 AssertRC(rc2);
5714 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5715 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5716 }
5717
5718 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5719 *pGCPhysMem = GCPhys;
5720 return VINF_SUCCESS;
5721}
5722
5723
5724/**
5725 * Looks up a memory mapping entry.
5726 *
5727 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5729 * @param pvMem The memory address.
5730 * @param fAccess The access to.
5731 */
5732DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5733{
5734 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5735 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5736 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5737 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5738 return 0;
5739 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5740 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5741 return 1;
5742 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5743 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5744 return 2;
5745 return VERR_NOT_FOUND;
5746}
5747
5748
5749/**
5750 * Finds a free memmap entry when using iNextMapping doesn't work.
5751 *
5752 * @returns Memory mapping index, 1024 on failure.
5753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5754 */
5755static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5756{
5757 /*
5758 * The easy case.
5759 */
5760 if (pVCpu->iem.s.cActiveMappings == 0)
5761 {
5762 pVCpu->iem.s.iNextMapping = 1;
5763 return 0;
5764 }
5765
5766 /* There should be enough mappings for all instructions. */
5767 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5768
5769 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5770 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5771 return i;
5772
5773 AssertFailedReturn(1024);
5774}
5775
5776
5777/**
5778 * Commits a bounce buffer that needs writing back and unmaps it.
5779 *
5780 * @returns Strict VBox status code.
5781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5782 * @param iMemMap The index of the buffer to commit.
5783 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5784 * Always false in ring-3, obviously.
5785 */
5786static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5787{
5788 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5789 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5790#ifdef IN_RING3
5791 Assert(!fPostponeFail);
5792 RT_NOREF_PV(fPostponeFail);
5793#endif
5794
5795 /*
5796 * Do the writing.
5797 */
5798 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5799 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5800 {
5801 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5802 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5803 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5804 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5805 {
5806 /*
5807 * Carefully and efficiently dealing with access handler return
5808 * codes make this a little bloated.
5809 */
5810 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5811 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5812 pbBuf,
5813 cbFirst,
5814 PGMACCESSORIGIN_IEM);
5815 if (rcStrict == VINF_SUCCESS)
5816 {
5817 if (cbSecond)
5818 {
5819 rcStrict = PGMPhysWrite(pVM,
5820 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5821 pbBuf + cbFirst,
5822 cbSecond,
5823 PGMACCESSORIGIN_IEM);
5824 if (rcStrict == VINF_SUCCESS)
5825 { /* nothing */ }
5826 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5827 {
5828 LogEx(LOG_GROUP_IEM,
5829 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5830 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5831 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5832 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5833 }
5834#ifndef IN_RING3
5835 else if (fPostponeFail)
5836 {
5837 LogEx(LOG_GROUP_IEM,
5838 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5839 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5840 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5841 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5842 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5843 return iemSetPassUpStatus(pVCpu, rcStrict);
5844 }
5845#endif
5846 else
5847 {
5848 LogEx(LOG_GROUP_IEM,
5849 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5850 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5851 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5852 return rcStrict;
5853 }
5854 }
5855 }
5856 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5857 {
5858 if (!cbSecond)
5859 {
5860 LogEx(LOG_GROUP_IEM,
5861 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5863 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5864 }
5865 else
5866 {
5867 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5868 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5869 pbBuf + cbFirst,
5870 cbSecond,
5871 PGMACCESSORIGIN_IEM);
5872 if (rcStrict2 == VINF_SUCCESS)
5873 {
5874 LogEx(LOG_GROUP_IEM,
5875 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5876 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5877 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5878 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5879 }
5880 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5881 {
5882 LogEx(LOG_GROUP_IEM,
5883 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5885 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5886 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5887 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5888 }
5889#ifndef IN_RING3
5890 else if (fPostponeFail)
5891 {
5892 LogEx(LOG_GROUP_IEM,
5893 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5896 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5897 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5898 return iemSetPassUpStatus(pVCpu, rcStrict);
5899 }
5900#endif
5901 else
5902 {
5903 LogEx(LOG_GROUP_IEM,
5904 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5906 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5907 return rcStrict2;
5908 }
5909 }
5910 }
5911#ifndef IN_RING3
5912 else if (fPostponeFail)
5913 {
5914 LogEx(LOG_GROUP_IEM,
5915 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5916 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5917 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5918 if (!cbSecond)
5919 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5920 else
5921 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5922 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5923 return iemSetPassUpStatus(pVCpu, rcStrict);
5924 }
5925#endif
5926 else
5927 {
5928 LogEx(LOG_GROUP_IEM,
5929 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5930 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5932 return rcStrict;
5933 }
5934 }
5935 else
5936 {
5937 /*
5938 * No access handlers, much simpler.
5939 */
5940 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5941 if (RT_SUCCESS(rc))
5942 {
5943 if (cbSecond)
5944 {
5945 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5946 if (RT_SUCCESS(rc))
5947 { /* likely */ }
5948 else
5949 {
5950 LogEx(LOG_GROUP_IEM,
5951 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5952 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5954 return rc;
5955 }
5956 }
5957 }
5958 else
5959 {
5960 LogEx(LOG_GROUP_IEM,
5961 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5962 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5963 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5964 return rc;
5965 }
5966 }
5967 }
5968
5969#if defined(IEM_LOG_MEMORY_WRITES)
5970 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5971 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5972 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5973 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5974 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5975 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5976
5977 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5978 g_cbIemWrote = cbWrote;
5979 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5980#endif
5981
5982 /*
5983 * Free the mapping entry.
5984 */
5985 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5986 Assert(pVCpu->iem.s.cActiveMappings != 0);
5987 pVCpu->iem.s.cActiveMappings--;
5988 return VINF_SUCCESS;
5989}
5990
5991
5992/**
5993 * iemMemMap worker that deals with a request crossing pages.
5994 */
5995static VBOXSTRICTRC
5996iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5997{
5998 Assert(cbMem <= GUEST_PAGE_SIZE);
5999
6000 /*
6001 * Do the address translations.
6002 */
6003 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6004 RTGCPHYS GCPhysFirst;
6005 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6006 if (rcStrict != VINF_SUCCESS)
6007 return rcStrict;
6008 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6009
6010 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6011 RTGCPHYS GCPhysSecond;
6012 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6013 cbSecondPage, fAccess, &GCPhysSecond);
6014 if (rcStrict != VINF_SUCCESS)
6015 return rcStrict;
6016 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6017 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6018
6019 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6020
6021 /*
6022 * Read in the current memory content if it's a read, execute or partial
6023 * write access.
6024 */
6025 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6026
6027 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6028 {
6029 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6030 {
6031 /*
6032 * Must carefully deal with access handler status codes here,
6033 * makes the code a bit bloated.
6034 */
6035 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6036 if (rcStrict == VINF_SUCCESS)
6037 {
6038 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6039 if (rcStrict == VINF_SUCCESS)
6040 { /*likely */ }
6041 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6042 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6043 else
6044 {
6045 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6046 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6047 return rcStrict;
6048 }
6049 }
6050 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6051 {
6052 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6053 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6054 {
6055 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6056 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6057 }
6058 else
6059 {
6060 LogEx(LOG_GROUP_IEM,
6061 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6062 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6063 return rcStrict2;
6064 }
6065 }
6066 else
6067 {
6068 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6069 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6070 return rcStrict;
6071 }
6072 }
6073 else
6074 {
6075 /*
6076 * No informational status codes here, much more straight forward.
6077 */
6078 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6079 if (RT_SUCCESS(rc))
6080 {
6081 Assert(rc == VINF_SUCCESS);
6082 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6083 if (RT_SUCCESS(rc))
6084 Assert(rc == VINF_SUCCESS);
6085 else
6086 {
6087 LogEx(LOG_GROUP_IEM,
6088 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6089 return rc;
6090 }
6091 }
6092 else
6093 {
6094 LogEx(LOG_GROUP_IEM,
6095 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6096 return rc;
6097 }
6098 }
6099 }
6100#ifdef VBOX_STRICT
6101 else
6102 memset(pbBuf, 0xcc, cbMem);
6103 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6104 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6105#endif
6106 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6107
6108 /*
6109 * Commit the bounce buffer entry.
6110 */
6111 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6112 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6113 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6114 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6115 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6116 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6117 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6118 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6119 pVCpu->iem.s.cActiveMappings++;
6120
6121 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6122 *ppvMem = pbBuf;
6123 return VINF_SUCCESS;
6124}
6125
6126
6127/**
6128 * iemMemMap woker that deals with iemMemPageMap failures.
6129 */
6130static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6131 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6132{
6133 /*
6134 * Filter out conditions we can handle and the ones which shouldn't happen.
6135 */
6136 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6137 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6138 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6139 {
6140 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6141 return rcMap;
6142 }
6143 pVCpu->iem.s.cPotentialExits++;
6144
6145 /*
6146 * Read in the current memory content if it's a read, execute or partial
6147 * write access.
6148 */
6149 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6150 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6151 {
6152 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6153 memset(pbBuf, 0xff, cbMem);
6154 else
6155 {
6156 int rc;
6157 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6158 {
6159 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6160 if (rcStrict == VINF_SUCCESS)
6161 { /* nothing */ }
6162 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6163 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6164 else
6165 {
6166 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6167 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6168 return rcStrict;
6169 }
6170 }
6171 else
6172 {
6173 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6174 if (RT_SUCCESS(rc))
6175 { /* likely */ }
6176 else
6177 {
6178 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6179 GCPhysFirst, rc));
6180 return rc;
6181 }
6182 }
6183 }
6184 }
6185#ifdef VBOX_STRICT
6186 else
6187 memset(pbBuf, 0xcc, cbMem);
6188#endif
6189#ifdef VBOX_STRICT
6190 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6191 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6192#endif
6193
6194 /*
6195 * Commit the bounce buffer entry.
6196 */
6197 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6198 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6199 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6200 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6201 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6202 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6203 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6204 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6205 pVCpu->iem.s.cActiveMappings++;
6206
6207 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6208 *ppvMem = pbBuf;
6209 return VINF_SUCCESS;
6210}
6211
6212
6213
6214/**
6215 * Maps the specified guest memory for the given kind of access.
6216 *
6217 * This may be using bounce buffering of the memory if it's crossing a page
6218 * boundary or if there is an access handler installed for any of it. Because
6219 * of lock prefix guarantees, we're in for some extra clutter when this
6220 * happens.
6221 *
6222 * This may raise a \#GP, \#SS, \#PF or \#AC.
6223 *
6224 * @returns VBox strict status code.
6225 *
6226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6227 * @param ppvMem Where to return the pointer to the mapped memory.
6228 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6229 * 8, 12, 16, 32 or 512. When used by string operations
6230 * it can be up to a page.
6231 * @param iSegReg The index of the segment register to use for this
6232 * access. The base and limits are checked. Use UINT8_MAX
6233 * to indicate that no segmentation is required (for IDT,
6234 * GDT and LDT accesses).
6235 * @param GCPtrMem The address of the guest memory.
6236 * @param fAccess How the memory is being accessed. The
6237 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6238 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6239 * when raising exceptions.
6240 * @param uAlignCtl Alignment control:
6241 * - Bits 15:0 is the alignment mask.
6242 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6243 * IEM_MEMMAP_F_ALIGN_SSE, and
6244 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6245 * Pass zero to skip alignment.
6246 */
6247VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6248 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6249{
6250 /*
6251 * Check the input and figure out which mapping entry to use.
6252 */
6253 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6254 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6255 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6256 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6257 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6258
6259 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6260 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6261 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6262 {
6263 iMemMap = iemMemMapFindFree(pVCpu);
6264 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6265 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6266 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6267 pVCpu->iem.s.aMemMappings[2].fAccess),
6268 VERR_IEM_IPE_9);
6269 }
6270
6271 /*
6272 * Map the memory, checking that we can actually access it. If something
6273 * slightly complicated happens, fall back on bounce buffering.
6274 */
6275 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6276 if (rcStrict == VINF_SUCCESS)
6277 { /* likely */ }
6278 else
6279 return rcStrict;
6280
6281 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6282 { /* likely */ }
6283 else
6284 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6285
6286 /*
6287 * Alignment check.
6288 */
6289 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6290 { /* likelyish */ }
6291 else
6292 {
6293 /* Misaligned access. */
6294 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6295 {
6296 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6297 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6298 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6299 {
6300 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6301
6302 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6303 return iemRaiseAlignmentCheckException(pVCpu);
6304 }
6305 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6306 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6307 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6308 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6309 * that's what FXSAVE does on a 10980xe. */
6310 && iemMemAreAlignmentChecksEnabled(pVCpu))
6311 return iemRaiseAlignmentCheckException(pVCpu);
6312 else
6313 return iemRaiseGeneralProtectionFault0(pVCpu);
6314 }
6315 }
6316
6317#ifdef IEM_WITH_DATA_TLB
6318 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6319
6320 /*
6321 * Get the TLB entry for this page.
6322 */
6323 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6324 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6325 if (pTlbe->uTag == uTag)
6326 {
6327# ifdef VBOX_WITH_STATISTICS
6328 pVCpu->iem.s.DataTlb.cTlbHits++;
6329# endif
6330 }
6331 else
6332 {
6333 pVCpu->iem.s.DataTlb.cTlbMisses++;
6334 PGMPTWALK Walk;
6335 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6336 if (RT_FAILURE(rc))
6337 {
6338 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6339# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6340 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6341 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6342# endif
6343 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6344 }
6345
6346 Assert(Walk.fSucceeded);
6347 pTlbe->uTag = uTag;
6348 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6349 pTlbe->GCPhys = Walk.GCPhys;
6350 pTlbe->pbMappingR3 = NULL;
6351 }
6352
6353 /*
6354 * Check TLB page table level access flags.
6355 */
6356 /* If the page is either supervisor only or non-writable, we need to do
6357 more careful access checks. */
6358 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6359 {
6360 /* Write to read only memory? */
6361 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6362 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6363 && ( ( IEM_GET_CPL(pVCpu) == 3
6364 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6365 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6366 {
6367 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6368# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6369 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6370 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6371# endif
6372 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6373 }
6374
6375 /* Kernel memory accessed by userland? */
6376 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6377 && IEM_GET_CPL(pVCpu) == 3
6378 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6379 {
6380 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6381# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6382 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6383 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6384# endif
6385 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6386 }
6387 }
6388
6389 /*
6390 * Set the dirty / access flags.
6391 * ASSUMES this is set when the address is translated rather than on commit...
6392 */
6393 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6394 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6395 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6396 {
6397 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6398 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6399 AssertRC(rc2);
6400 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6401 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6402 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6403 }
6404
6405 /*
6406 * Look up the physical page info if necessary.
6407 */
6408 uint8_t *pbMem = NULL;
6409 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6410# ifdef IN_RING3
6411 pbMem = pTlbe->pbMappingR3;
6412# else
6413 pbMem = NULL;
6414# endif
6415 else
6416 {
6417 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6418 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6419 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6420 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6421 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6422 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6423 { /* likely */ }
6424 else
6425 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6426 pTlbe->pbMappingR3 = NULL;
6427 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6428 | IEMTLBE_F_NO_MAPPINGR3
6429 | IEMTLBE_F_PG_NO_READ
6430 | IEMTLBE_F_PG_NO_WRITE
6431 | IEMTLBE_F_PG_UNASSIGNED
6432 | IEMTLBE_F_PG_CODE_PAGE);
6433 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6434 &pbMem, &pTlbe->fFlagsAndPhysRev);
6435 AssertRCReturn(rc, rc);
6436# ifdef IN_RING3
6437 pTlbe->pbMappingR3 = pbMem;
6438# endif
6439 }
6440
6441 /*
6442 * Check the physical page level access and mapping.
6443 */
6444 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6445 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6446 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6447 { /* probably likely */ }
6448 else
6449 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6450 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6451 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6452 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6453 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6454 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6455
6456 if (pbMem)
6457 {
6458 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6459 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6460 fAccess |= IEM_ACCESS_NOT_LOCKED;
6461 }
6462 else
6463 {
6464 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6465 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6466 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6467 if (rcStrict != VINF_SUCCESS)
6468 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6469 }
6470
6471 void * const pvMem = pbMem;
6472
6473 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6474 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6475 if (fAccess & IEM_ACCESS_TYPE_READ)
6476 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6477
6478#else /* !IEM_WITH_DATA_TLB */
6479
6480 RTGCPHYS GCPhysFirst;
6481 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6482 if (rcStrict != VINF_SUCCESS)
6483 return rcStrict;
6484
6485 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6486 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6487 if (fAccess & IEM_ACCESS_TYPE_READ)
6488 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6489
6490 void *pvMem;
6491 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6492 if (rcStrict != VINF_SUCCESS)
6493 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6494
6495#endif /* !IEM_WITH_DATA_TLB */
6496
6497 /*
6498 * Fill in the mapping table entry.
6499 */
6500 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6501 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6502 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6503 pVCpu->iem.s.cActiveMappings += 1;
6504
6505 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6506 *ppvMem = pvMem;
6507
6508 return VINF_SUCCESS;
6509}
6510
6511
6512/**
6513 * Commits the guest memory if bounce buffered and unmaps it.
6514 *
6515 * @returns Strict VBox status code.
6516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6517 * @param pvMem The mapping.
6518 * @param fAccess The kind of access.
6519 */
6520VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6521{
6522 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6523 AssertReturn(iMemMap >= 0, iMemMap);
6524
6525 /* If it's bounce buffered, we may need to write back the buffer. */
6526 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6527 {
6528 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6529 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6530 }
6531 /* Otherwise unlock it. */
6532 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6533 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6534
6535 /* Free the entry. */
6536 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6537 Assert(pVCpu->iem.s.cActiveMappings != 0);
6538 pVCpu->iem.s.cActiveMappings--;
6539 return VINF_SUCCESS;
6540}
6541
6542#ifdef IEM_WITH_SETJMP
6543
6544/**
6545 * Maps the specified guest memory for the given kind of access, longjmp on
6546 * error.
6547 *
6548 * This may be using bounce buffering of the memory if it's crossing a page
6549 * boundary or if there is an access handler installed for any of it. Because
6550 * of lock prefix guarantees, we're in for some extra clutter when this
6551 * happens.
6552 *
6553 * This may raise a \#GP, \#SS, \#PF or \#AC.
6554 *
6555 * @returns Pointer to the mapped memory.
6556 *
6557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6558 * @param cbMem The number of bytes to map. This is usually 1,
6559 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6560 * string operations it can be up to a page.
6561 * @param iSegReg The index of the segment register to use for
6562 * this access. The base and limits are checked.
6563 * Use UINT8_MAX to indicate that no segmentation
6564 * is required (for IDT, GDT and LDT accesses).
6565 * @param GCPtrMem The address of the guest memory.
6566 * @param fAccess How the memory is being accessed. The
6567 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6568 * how to map the memory, while the
6569 * IEM_ACCESS_WHAT_XXX bit is used when raising
6570 * exceptions.
6571 * @param uAlignCtl Alignment control:
6572 * - Bits 15:0 is the alignment mask.
6573 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6574 * IEM_MEMMAP_F_ALIGN_SSE, and
6575 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6576 * Pass zero to skip alignment.
6577 */
6578void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6579 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6580{
6581 /*
6582 * Check the input, check segment access and adjust address
6583 * with segment base.
6584 */
6585 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6586 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6587 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6588
6589 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6590 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6591 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6592
6593 /*
6594 * Alignment check.
6595 */
6596 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6597 { /* likelyish */ }
6598 else
6599 {
6600 /* Misaligned access. */
6601 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6602 {
6603 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6604 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6605 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6606 {
6607 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6608
6609 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6610 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6611 }
6612 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6613 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6614 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6615 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6616 * that's what FXSAVE does on a 10980xe. */
6617 && iemMemAreAlignmentChecksEnabled(pVCpu))
6618 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6619 else
6620 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6621 }
6622 }
6623
6624 /*
6625 * Figure out which mapping entry to use.
6626 */
6627 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6628 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6629 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6630 {
6631 iMemMap = iemMemMapFindFree(pVCpu);
6632 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6633 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6634 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6635 pVCpu->iem.s.aMemMappings[2].fAccess),
6636 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6637 }
6638
6639 /*
6640 * Crossing a page boundary?
6641 */
6642 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6643 { /* No (likely). */ }
6644 else
6645 {
6646 void *pvMem;
6647 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6648 if (rcStrict == VINF_SUCCESS)
6649 return pvMem;
6650 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6651 }
6652
6653#ifdef IEM_WITH_DATA_TLB
6654 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6655
6656 /*
6657 * Get the TLB entry for this page.
6658 */
6659 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6660 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6661 if (pTlbe->uTag == uTag)
6662 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6663 else
6664 {
6665 pVCpu->iem.s.DataTlb.cTlbMisses++;
6666 PGMPTWALK Walk;
6667 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6668 if (RT_FAILURE(rc))
6669 {
6670 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6671# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6672 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6673 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6674# endif
6675 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6676 }
6677
6678 Assert(Walk.fSucceeded);
6679 pTlbe->uTag = uTag;
6680 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6681 pTlbe->GCPhys = Walk.GCPhys;
6682 pTlbe->pbMappingR3 = NULL;
6683 }
6684
6685 /*
6686 * Check the flags and physical revision.
6687 */
6688 /** @todo make the caller pass these in with fAccess. */
6689 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6690 ? IEMTLBE_F_PT_NO_USER : 0;
6691 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6692 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6693 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6694 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6695 ? IEMTLBE_F_PT_NO_WRITE : 0)
6696 : 0;
6697 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6698 uint8_t *pbMem = NULL;
6699 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6700 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6701# ifdef IN_RING3
6702 pbMem = pTlbe->pbMappingR3;
6703# else
6704 pbMem = NULL;
6705# endif
6706 else
6707 {
6708 /*
6709 * Okay, something isn't quite right or needs refreshing.
6710 */
6711 /* Write to read only memory? */
6712 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6713 {
6714 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6715# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6716 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6717 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6718# endif
6719 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6720 }
6721
6722 /* Kernel memory accessed by userland? */
6723 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6724 {
6725 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6726# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6727 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6728 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6729# endif
6730 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6731 }
6732
6733 /* Set the dirty / access flags.
6734 ASSUMES this is set when the address is translated rather than on commit... */
6735 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6736 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6737 {
6738 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6739 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6740 AssertRC(rc2);
6741 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6742 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6743 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6744 }
6745
6746 /*
6747 * Check if the physical page info needs updating.
6748 */
6749 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6750# ifdef IN_RING3
6751 pbMem = pTlbe->pbMappingR3;
6752# else
6753 pbMem = NULL;
6754# endif
6755 else
6756 {
6757 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6758 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6759 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6760 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6761 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6762 pTlbe->pbMappingR3 = NULL;
6763 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6764 | IEMTLBE_F_NO_MAPPINGR3
6765 | IEMTLBE_F_PG_NO_READ
6766 | IEMTLBE_F_PG_NO_WRITE
6767 | IEMTLBE_F_PG_UNASSIGNED
6768 | IEMTLBE_F_PG_CODE_PAGE);
6769 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6770 &pbMem, &pTlbe->fFlagsAndPhysRev);
6771 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6772# ifdef IN_RING3
6773 pTlbe->pbMappingR3 = pbMem;
6774# endif
6775 }
6776
6777 /*
6778 * Check the physical page level access and mapping.
6779 */
6780 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6781 { /* probably likely */ }
6782 else
6783 {
6784 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6785 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6786 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6787 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6788 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6789 if (rcStrict == VINF_SUCCESS)
6790 return pbMem;
6791 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6792 }
6793 }
6794 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6795
6796 if (pbMem)
6797 {
6798 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6799 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6800 fAccess |= IEM_ACCESS_NOT_LOCKED;
6801 }
6802 else
6803 {
6804 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6805 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6806 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6807 if (rcStrict == VINF_SUCCESS)
6808 return pbMem;
6809 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6810 }
6811
6812 void * const pvMem = pbMem;
6813
6814 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6815 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6816 if (fAccess & IEM_ACCESS_TYPE_READ)
6817 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6818
6819#else /* !IEM_WITH_DATA_TLB */
6820
6821
6822 RTGCPHYS GCPhysFirst;
6823 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6824 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6825 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6826
6827 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6828 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6829 if (fAccess & IEM_ACCESS_TYPE_READ)
6830 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6831
6832 void *pvMem;
6833 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6834 if (rcStrict == VINF_SUCCESS)
6835 { /* likely */ }
6836 else
6837 {
6838 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6839 if (rcStrict == VINF_SUCCESS)
6840 return pvMem;
6841 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6842 }
6843
6844#endif /* !IEM_WITH_DATA_TLB */
6845
6846 /*
6847 * Fill in the mapping table entry.
6848 */
6849 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6850 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6851 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6852 pVCpu->iem.s.cActiveMappings++;
6853
6854 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6855 return pvMem;
6856}
6857
6858
6859/**
6860 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6861 *
6862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6863 * @param pvMem The mapping.
6864 * @param fAccess The kind of access.
6865 */
6866void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6867{
6868 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6869 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6870
6871 /* If it's bounce buffered, we may need to write back the buffer. */
6872 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6873 {
6874 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6875 {
6876 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6877 if (rcStrict == VINF_SUCCESS)
6878 return;
6879 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6880 }
6881 }
6882 /* Otherwise unlock it. */
6883 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6884 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6885
6886 /* Free the entry. */
6887 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6888 Assert(pVCpu->iem.s.cActiveMappings != 0);
6889 pVCpu->iem.s.cActiveMappings--;
6890}
6891
6892
6893/** Fallback for iemMemCommitAndUnmapRwJmp. */
6894void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6895{
6896 Assert(bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); RT_NOREF_PV(bMapInfo);
6897 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_RW);
6898}
6899
6900
6901/** Fallback for iemMemCommitAndUnmapWoJmp. */
6902void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6903{
6904 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); RT_NOREF_PV(bMapInfo);
6905 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_W);
6906}
6907
6908
6909/** Fallback for iemMemCommitAndUnmapRoJmp. */
6910void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6911{
6912 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);
6913 iemMemCommitAndUnmapJmp(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);
6914}
6915
6916#endif /* IEM_WITH_SETJMP */
6917
6918#ifndef IN_RING3
6919/**
6920 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6921 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6922 *
6923 * Allows the instruction to be completed and retired, while the IEM user will
6924 * return to ring-3 immediately afterwards and do the postponed writes there.
6925 *
6926 * @returns VBox status code (no strict statuses). Caller must check
6927 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6929 * @param pvMem The mapping.
6930 * @param fAccess The kind of access.
6931 */
6932VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6933{
6934 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6935 AssertReturn(iMemMap >= 0, iMemMap);
6936
6937 /* If it's bounce buffered, we may need to write back the buffer. */
6938 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6939 {
6940 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6941 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6942 }
6943 /* Otherwise unlock it. */
6944 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6945 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6946
6947 /* Free the entry. */
6948 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6949 Assert(pVCpu->iem.s.cActiveMappings != 0);
6950 pVCpu->iem.s.cActiveMappings--;
6951 return VINF_SUCCESS;
6952}
6953#endif
6954
6955
6956/**
6957 * Rollbacks mappings, releasing page locks and such.
6958 *
6959 * The caller shall only call this after checking cActiveMappings.
6960 *
6961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6962 */
6963void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6964{
6965 Assert(pVCpu->iem.s.cActiveMappings > 0);
6966
6967 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6968 while (iMemMap-- > 0)
6969 {
6970 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6971 if (fAccess != IEM_ACCESS_INVALID)
6972 {
6973 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6974 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6975 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6976 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6977 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6978 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6979 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6980 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6981 pVCpu->iem.s.cActiveMappings--;
6982 }
6983 }
6984}
6985
6986
6987/*
6988 * Instantiate R/W templates.
6989 */
6990#define TMPL_MEM_WITH_STACK
6991
6992#define TMPL_MEM_TYPE uint8_t
6993#define TMPL_MEM_FN_SUFF U8
6994#define TMPL_MEM_FMT_TYPE "%#04x"
6995#define TMPL_MEM_FMT_DESC "byte"
6996#include "IEMAllMemRWTmpl.cpp.h"
6997
6998#define TMPL_MEM_TYPE uint16_t
6999#define TMPL_MEM_FN_SUFF U16
7000#define TMPL_MEM_FMT_TYPE "%#06x"
7001#define TMPL_MEM_FMT_DESC "word"
7002#include "IEMAllMemRWTmpl.cpp.h"
7003
7004#define TMPL_WITH_PUSH_SREG
7005#define TMPL_MEM_TYPE uint32_t
7006#define TMPL_MEM_FN_SUFF U32
7007#define TMPL_MEM_FMT_TYPE "%#010x"
7008#define TMPL_MEM_FMT_DESC "dword"
7009#include "IEMAllMemRWTmpl.cpp.h"
7010#undef TMPL_WITH_PUSH_SREG
7011
7012#define TMPL_MEM_TYPE uint64_t
7013#define TMPL_MEM_FN_SUFF U64
7014#define TMPL_MEM_FMT_TYPE "%#018RX64"
7015#define TMPL_MEM_FMT_DESC "qword"
7016#include "IEMAllMemRWTmpl.cpp.h"
7017
7018#undef TMPL_MEM_WITH_STACK
7019
7020#define TMPL_MEM_TYPE uint64_t
7021#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7022#define TMPL_MEM_FN_SUFF U64AlignedU128
7023#define TMPL_MEM_FMT_TYPE "%#018RX64"
7024#define TMPL_MEM_FMT_DESC "qword"
7025#include "IEMAllMemRWTmpl.cpp.h"
7026
7027
7028/**
7029 * Fetches a data dword and zero extends it to a qword.
7030 *
7031 * @returns Strict VBox status code.
7032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7033 * @param pu64Dst Where to return the qword.
7034 * @param iSegReg The index of the segment register to use for
7035 * this access. The base and limits are checked.
7036 * @param GCPtrMem The address of the guest memory.
7037 */
7038VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7039{
7040 /* The lazy approach for now... */
7041 uint32_t const *pu32Src;
7042 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7043 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7044 if (rc == VINF_SUCCESS)
7045 {
7046 *pu64Dst = *pu32Src;
7047 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7048 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7049 }
7050 return rc;
7051}
7052
7053
7054#ifdef SOME_UNUSED_FUNCTION
7055/**
7056 * Fetches a data dword and sign extends it to a qword.
7057 *
7058 * @returns Strict VBox status code.
7059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7060 * @param pu64Dst Where to return the sign extended value.
7061 * @param iSegReg The index of the segment register to use for
7062 * this access. The base and limits are checked.
7063 * @param GCPtrMem The address of the guest memory.
7064 */
7065VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7066{
7067 /* The lazy approach for now... */
7068 int32_t const *pi32Src;
7069 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7070 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7071 if (rc == VINF_SUCCESS)
7072 {
7073 *pu64Dst = *pi32Src;
7074 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7075 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7076 }
7077#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7078 else
7079 *pu64Dst = 0;
7080#endif
7081 return rc;
7082}
7083#endif
7084
7085
7086/**
7087 * Fetches a data tword.
7088 *
7089 * @returns Strict VBox status code.
7090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7091 * @param pr80Dst Where to return the tword.
7092 * @param iSegReg The index of the segment register to use for
7093 * this access. The base and limits are checked.
7094 * @param GCPtrMem The address of the guest memory.
7095 */
7096VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7097{
7098 /* The lazy approach for now... */
7099 PCRTFLOAT80U pr80Src;
7100 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7101 if (rc == VINF_SUCCESS)
7102 {
7103 *pr80Dst = *pr80Src;
7104 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7105 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7106 }
7107 return rc;
7108}
7109
7110
7111#ifdef IEM_WITH_SETJMP
7112/**
7113 * Fetches a data tword, longjmp on error.
7114 *
7115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7116 * @param pr80Dst Where to return the tword.
7117 * @param iSegReg The index of the segment register to use for
7118 * this access. The base and limits are checked.
7119 * @param GCPtrMem The address of the guest memory.
7120 */
7121void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7122{
7123 /* The lazy approach for now... */
7124 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7125 *pr80Dst = *pr80Src;
7126 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7127 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7128}
7129#endif
7130
7131
7132/**
7133 * Fetches a data decimal tword.
7134 *
7135 * @returns Strict VBox status code.
7136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7137 * @param pd80Dst Where to return the tword.
7138 * @param iSegReg The index of the segment register to use for
7139 * this access. The base and limits are checked.
7140 * @param GCPtrMem The address of the guest memory.
7141 */
7142VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7143{
7144 /* The lazy approach for now... */
7145 PCRTPBCD80U pd80Src;
7146 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7147 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7148 if (rc == VINF_SUCCESS)
7149 {
7150 *pd80Dst = *pd80Src;
7151 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7152 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7153 }
7154 return rc;
7155}
7156
7157
7158#ifdef IEM_WITH_SETJMP
7159/**
7160 * Fetches a data decimal tword, longjmp on error.
7161 *
7162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7163 * @param pd80Dst Where to return the tword.
7164 * @param iSegReg The index of the segment register to use for
7165 * this access. The base and limits are checked.
7166 * @param GCPtrMem The address of the guest memory.
7167 */
7168void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7169{
7170 /* The lazy approach for now... */
7171 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7172 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7173 *pd80Dst = *pd80Src;
7174 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7175 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7176}
7177#endif
7178
7179
7180/**
7181 * Fetches a data dqword (double qword), generally SSE related.
7182 *
7183 * @returns Strict VBox status code.
7184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7185 * @param pu128Dst Where to return the qword.
7186 * @param iSegReg The index of the segment register to use for
7187 * this access. The base and limits are checked.
7188 * @param GCPtrMem The address of the guest memory.
7189 */
7190VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7191{
7192 /* The lazy approach for now... */
7193 PCRTUINT128U pu128Src;
7194 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7195 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7196 if (rc == VINF_SUCCESS)
7197 {
7198 pu128Dst->au64[0] = pu128Src->au64[0];
7199 pu128Dst->au64[1] = pu128Src->au64[1];
7200 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7201 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7202 }
7203 return rc;
7204}
7205
7206
7207#ifdef IEM_WITH_SETJMP
7208/**
7209 * Fetches a data dqword (double qword), generally SSE related.
7210 *
7211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7212 * @param pu128Dst Where to return the qword.
7213 * @param iSegReg The index of the segment register to use for
7214 * this access. The base and limits are checked.
7215 * @param GCPtrMem The address of the guest memory.
7216 */
7217void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7218{
7219 /* The lazy approach for now... */
7220 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7221 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7222 pu128Dst->au64[0] = pu128Src->au64[0];
7223 pu128Dst->au64[1] = pu128Src->au64[1];
7224 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7225 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7226}
7227#endif
7228
7229
7230/**
7231 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7232 * related.
7233 *
7234 * Raises \#GP(0) if not aligned.
7235 *
7236 * @returns Strict VBox status code.
7237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7238 * @param pu128Dst Where to return the qword.
7239 * @param iSegReg The index of the segment register to use for
7240 * this access. The base and limits are checked.
7241 * @param GCPtrMem The address of the guest memory.
7242 */
7243VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7244{
7245 /* The lazy approach for now... */
7246 PCRTUINT128U pu128Src;
7247 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7248 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7249 if (rc == VINF_SUCCESS)
7250 {
7251 pu128Dst->au64[0] = pu128Src->au64[0];
7252 pu128Dst->au64[1] = pu128Src->au64[1];
7253 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7254 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7255 }
7256 return rc;
7257}
7258
7259
7260#ifdef IEM_WITH_SETJMP
7261/**
7262 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7263 * related, longjmp on error.
7264 *
7265 * Raises \#GP(0) if not aligned.
7266 *
7267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7268 * @param pu128Dst Where to return the qword.
7269 * @param iSegReg The index of the segment register to use for
7270 * this access. The base and limits are checked.
7271 * @param GCPtrMem The address of the guest memory.
7272 */
7273void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7274 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7275{
7276 /* The lazy approach for now... */
7277 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7278 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7279 pu128Dst->au64[0] = pu128Src->au64[0];
7280 pu128Dst->au64[1] = pu128Src->au64[1];
7281 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7282 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7283}
7284#endif
7285
7286
7287/**
7288 * Fetches a data oword (octo word), generally AVX related.
7289 *
7290 * @returns Strict VBox status code.
7291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7292 * @param pu256Dst Where to return the qword.
7293 * @param iSegReg The index of the segment register to use for
7294 * this access. The base and limits are checked.
7295 * @param GCPtrMem The address of the guest memory.
7296 */
7297VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7298{
7299 /* The lazy approach for now... */
7300 PCRTUINT256U pu256Src;
7301 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7302 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7303 if (rc == VINF_SUCCESS)
7304 {
7305 pu256Dst->au64[0] = pu256Src->au64[0];
7306 pu256Dst->au64[1] = pu256Src->au64[1];
7307 pu256Dst->au64[2] = pu256Src->au64[2];
7308 pu256Dst->au64[3] = pu256Src->au64[3];
7309 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7310 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7311 }
7312 return rc;
7313}
7314
7315
7316#ifdef IEM_WITH_SETJMP
7317/**
7318 * Fetches a data oword (octo word), generally AVX related.
7319 *
7320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7321 * @param pu256Dst Where to return the qword.
7322 * @param iSegReg The index of the segment register to use for
7323 * this access. The base and limits are checked.
7324 * @param GCPtrMem The address of the guest memory.
7325 */
7326void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7327{
7328 /* The lazy approach for now... */
7329 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7330 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7331 pu256Dst->au64[0] = pu256Src->au64[0];
7332 pu256Dst->au64[1] = pu256Src->au64[1];
7333 pu256Dst->au64[2] = pu256Src->au64[2];
7334 pu256Dst->au64[3] = pu256Src->au64[3];
7335 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7336 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7337}
7338#endif
7339
7340
7341/**
7342 * Fetches a data oword (octo word) at an aligned address, generally AVX
7343 * related.
7344 *
7345 * Raises \#GP(0) if not aligned.
7346 *
7347 * @returns Strict VBox status code.
7348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7349 * @param pu256Dst Where to return the qword.
7350 * @param iSegReg The index of the segment register to use for
7351 * this access. The base and limits are checked.
7352 * @param GCPtrMem The address of the guest memory.
7353 */
7354VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7355{
7356 /* The lazy approach for now... */
7357 PCRTUINT256U pu256Src;
7358 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7359 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7360 if (rc == VINF_SUCCESS)
7361 {
7362 pu256Dst->au64[0] = pu256Src->au64[0];
7363 pu256Dst->au64[1] = pu256Src->au64[1];
7364 pu256Dst->au64[2] = pu256Src->au64[2];
7365 pu256Dst->au64[3] = pu256Src->au64[3];
7366 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7367 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7368 }
7369 return rc;
7370}
7371
7372
7373#ifdef IEM_WITH_SETJMP
7374/**
7375 * Fetches a data oword (octo word) at an aligned address, generally AVX
7376 * related, longjmp on error.
7377 *
7378 * Raises \#GP(0) if not aligned.
7379 *
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param pu256Dst Where to return the qword.
7382 * @param iSegReg The index of the segment register to use for
7383 * this access. The base and limits are checked.
7384 * @param GCPtrMem The address of the guest memory.
7385 */
7386void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7387 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7388{
7389 /* The lazy approach for now... */
7390 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7391 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7392 pu256Dst->au64[0] = pu256Src->au64[0];
7393 pu256Dst->au64[1] = pu256Src->au64[1];
7394 pu256Dst->au64[2] = pu256Src->au64[2];
7395 pu256Dst->au64[3] = pu256Src->au64[3];
7396 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7397 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7398}
7399#endif
7400
7401
7402
7403/**
7404 * Fetches a descriptor register (lgdt, lidt).
7405 *
7406 * @returns Strict VBox status code.
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 * @param pcbLimit Where to return the limit.
7409 * @param pGCPtrBase Where to return the base.
7410 * @param iSegReg The index of the segment register to use for
7411 * this access. The base and limits are checked.
7412 * @param GCPtrMem The address of the guest memory.
7413 * @param enmOpSize The effective operand size.
7414 */
7415VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7416 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7417{
7418 /*
7419 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7420 * little special:
7421 * - The two reads are done separately.
7422 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7423 * - We suspect the 386 to actually commit the limit before the base in
7424 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7425 * don't try emulate this eccentric behavior, because it's not well
7426 * enough understood and rather hard to trigger.
7427 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7428 */
7429 VBOXSTRICTRC rcStrict;
7430 if (IEM_IS_64BIT_CODE(pVCpu))
7431 {
7432 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7433 if (rcStrict == VINF_SUCCESS)
7434 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7435 }
7436 else
7437 {
7438 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7439 if (enmOpSize == IEMMODE_32BIT)
7440 {
7441 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7442 {
7443 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7444 if (rcStrict == VINF_SUCCESS)
7445 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7446 }
7447 else
7448 {
7449 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7450 if (rcStrict == VINF_SUCCESS)
7451 {
7452 *pcbLimit = (uint16_t)uTmp;
7453 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7454 }
7455 }
7456 if (rcStrict == VINF_SUCCESS)
7457 *pGCPtrBase = uTmp;
7458 }
7459 else
7460 {
7461 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7462 if (rcStrict == VINF_SUCCESS)
7463 {
7464 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7465 if (rcStrict == VINF_SUCCESS)
7466 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7467 }
7468 }
7469 }
7470 return rcStrict;
7471}
7472
7473
7474/**
7475 * Stores a data dqword.
7476 *
7477 * @returns Strict VBox status code.
7478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7479 * @param iSegReg The index of the segment register to use for
7480 * this access. The base and limits are checked.
7481 * @param GCPtrMem The address of the guest memory.
7482 * @param u128Value The value to store.
7483 */
7484VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7485{
7486 /* The lazy approach for now... */
7487 PRTUINT128U pu128Dst;
7488 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7489 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7490 if (rc == VINF_SUCCESS)
7491 {
7492 pu128Dst->au64[0] = u128Value.au64[0];
7493 pu128Dst->au64[1] = u128Value.au64[1];
7494 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7495 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7496 }
7497 return rc;
7498}
7499
7500
7501#ifdef IEM_WITH_SETJMP
7502/**
7503 * Stores a data dqword, longjmp on error.
7504 *
7505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7506 * @param iSegReg The index of the segment register to use for
7507 * this access. The base and limits are checked.
7508 * @param GCPtrMem The address of the guest memory.
7509 * @param u128Value The value to store.
7510 */
7511void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7512{
7513 /* The lazy approach for now... */
7514 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7515 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7516 pu128Dst->au64[0] = u128Value.au64[0];
7517 pu128Dst->au64[1] = u128Value.au64[1];
7518 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7519 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7520}
7521#endif
7522
7523
7524/**
7525 * Stores a data dqword, SSE aligned.
7526 *
7527 * @returns Strict VBox status code.
7528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7529 * @param iSegReg The index of the segment register to use for
7530 * this access. The base and limits are checked.
7531 * @param GCPtrMem The address of the guest memory.
7532 * @param u128Value The value to store.
7533 */
7534VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7535{
7536 /* The lazy approach for now... */
7537 PRTUINT128U pu128Dst;
7538 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7539 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7540 if (rc == VINF_SUCCESS)
7541 {
7542 pu128Dst->au64[0] = u128Value.au64[0];
7543 pu128Dst->au64[1] = u128Value.au64[1];
7544 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7545 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7546 }
7547 return rc;
7548}
7549
7550
7551#ifdef IEM_WITH_SETJMP
7552/**
7553 * Stores a data dqword, SSE aligned.
7554 *
7555 * @returns Strict VBox status code.
7556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7557 * @param iSegReg The index of the segment register to use for
7558 * this access. The base and limits are checked.
7559 * @param GCPtrMem The address of the guest memory.
7560 * @param u128Value The value to store.
7561 */
7562void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7563 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7564{
7565 /* The lazy approach for now... */
7566 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7567 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7568 pu128Dst->au64[0] = u128Value.au64[0];
7569 pu128Dst->au64[1] = u128Value.au64[1];
7570 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7571 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7572}
7573#endif
7574
7575
7576/**
7577 * Stores a data dqword.
7578 *
7579 * @returns Strict VBox status code.
7580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7581 * @param iSegReg The index of the segment register to use for
7582 * this access. The base and limits are checked.
7583 * @param GCPtrMem The address of the guest memory.
7584 * @param pu256Value Pointer to the value to store.
7585 */
7586VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7587{
7588 /* The lazy approach for now... */
7589 PRTUINT256U pu256Dst;
7590 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7591 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7592 if (rc == VINF_SUCCESS)
7593 {
7594 pu256Dst->au64[0] = pu256Value->au64[0];
7595 pu256Dst->au64[1] = pu256Value->au64[1];
7596 pu256Dst->au64[2] = pu256Value->au64[2];
7597 pu256Dst->au64[3] = pu256Value->au64[3];
7598 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7599 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7600 }
7601 return rc;
7602}
7603
7604
7605#ifdef IEM_WITH_SETJMP
7606/**
7607 * Stores a data dqword, longjmp on error.
7608 *
7609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7610 * @param iSegReg The index of the segment register to use for
7611 * this access. The base and limits are checked.
7612 * @param GCPtrMem The address of the guest memory.
7613 * @param pu256Value Pointer to the value to store.
7614 */
7615void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7616{
7617 /* The lazy approach for now... */
7618 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7619 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7620 pu256Dst->au64[0] = pu256Value->au64[0];
7621 pu256Dst->au64[1] = pu256Value->au64[1];
7622 pu256Dst->au64[2] = pu256Value->au64[2];
7623 pu256Dst->au64[3] = pu256Value->au64[3];
7624 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7625 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7626}
7627#endif
7628
7629
7630/**
7631 * Stores a data dqword, AVX \#GP(0) aligned.
7632 *
7633 * @returns Strict VBox status code.
7634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7635 * @param iSegReg The index of the segment register to use for
7636 * this access. The base and limits are checked.
7637 * @param GCPtrMem The address of the guest memory.
7638 * @param pu256Value Pointer to the value to store.
7639 */
7640VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7641{
7642 /* The lazy approach for now... */
7643 PRTUINT256U pu256Dst;
7644 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7645 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7646 if (rc == VINF_SUCCESS)
7647 {
7648 pu256Dst->au64[0] = pu256Value->au64[0];
7649 pu256Dst->au64[1] = pu256Value->au64[1];
7650 pu256Dst->au64[2] = pu256Value->au64[2];
7651 pu256Dst->au64[3] = pu256Value->au64[3];
7652 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7653 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7654 }
7655 return rc;
7656}
7657
7658
7659#ifdef IEM_WITH_SETJMP
7660/**
7661 * Stores a data dqword, AVX aligned.
7662 *
7663 * @returns Strict VBox status code.
7664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7665 * @param iSegReg The index of the segment register to use for
7666 * this access. The base and limits are checked.
7667 * @param GCPtrMem The address of the guest memory.
7668 * @param pu256Value Pointer to the value to store.
7669 */
7670void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7671 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7672{
7673 /* The lazy approach for now... */
7674 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7675 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7676 pu256Dst->au64[0] = pu256Value->au64[0];
7677 pu256Dst->au64[1] = pu256Value->au64[1];
7678 pu256Dst->au64[2] = pu256Value->au64[2];
7679 pu256Dst->au64[3] = pu256Value->au64[3];
7680 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7681 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7682}
7683#endif
7684
7685
7686/**
7687 * Stores a descriptor register (sgdt, sidt).
7688 *
7689 * @returns Strict VBox status code.
7690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7691 * @param cbLimit The limit.
7692 * @param GCPtrBase The base address.
7693 * @param iSegReg The index of the segment register to use for
7694 * this access. The base and limits are checked.
7695 * @param GCPtrMem The address of the guest memory.
7696 */
7697VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7698{
7699 /*
7700 * The SIDT and SGDT instructions actually stores the data using two
7701 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7702 * does not respond to opsize prefixes.
7703 */
7704 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7705 if (rcStrict == VINF_SUCCESS)
7706 {
7707 if (IEM_IS_16BIT_CODE(pVCpu))
7708 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7709 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7710 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7711 else if (IEM_IS_32BIT_CODE(pVCpu))
7712 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7713 else
7714 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7715 }
7716 return rcStrict;
7717}
7718
7719
7720/**
7721 * Begin a special stack push (used by interrupt, exceptions and such).
7722 *
7723 * This will raise \#SS or \#PF if appropriate.
7724 *
7725 * @returns Strict VBox status code.
7726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7727 * @param cbMem The number of bytes to push onto the stack.
7728 * @param cbAlign The alignment mask (7, 3, 1).
7729 * @param ppvMem Where to return the pointer to the stack memory.
7730 * As with the other memory functions this could be
7731 * direct access or bounce buffered access, so
7732 * don't commit register until the commit call
7733 * succeeds.
7734 * @param puNewRsp Where to return the new RSP value. This must be
7735 * passed unchanged to
7736 * iemMemStackPushCommitSpecial().
7737 */
7738VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7739 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7740{
7741 Assert(cbMem < UINT8_MAX);
7742 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7743 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
7744 IEM_ACCESS_STACK_W, cbAlign);
7745}
7746
7747
7748/**
7749 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7750 *
7751 * This will update the rSP.
7752 *
7753 * @returns Strict VBox status code.
7754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7755 * @param pvMem The pointer returned by
7756 * iemMemStackPushBeginSpecial().
7757 * @param uNewRsp The new RSP value returned by
7758 * iemMemStackPushBeginSpecial().
7759 */
7760VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
7761{
7762 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
7763 if (rcStrict == VINF_SUCCESS)
7764 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7765 return rcStrict;
7766}
7767
7768
7769/**
7770 * Begin a special stack pop (used by iret, retf and such).
7771 *
7772 * This will raise \#SS or \#PF if appropriate.
7773 *
7774 * @returns Strict VBox status code.
7775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7776 * @param cbMem The number of bytes to pop from the stack.
7777 * @param cbAlign The alignment mask (7, 3, 1).
7778 * @param ppvMem Where to return the pointer to the stack memory.
7779 * @param puNewRsp Where to return the new RSP value. This must be
7780 * assigned to CPUMCTX::rsp manually some time
7781 * after iemMemStackPopDoneSpecial() has been
7782 * called.
7783 */
7784VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7785 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7786{
7787 Assert(cbMem < UINT8_MAX);
7788 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7789 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7790}
7791
7792
7793/**
7794 * Continue a special stack pop (used by iret and retf), for the purpose of
7795 * retrieving a new stack pointer.
7796 *
7797 * This will raise \#SS or \#PF if appropriate.
7798 *
7799 * @returns Strict VBox status code.
7800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7801 * @param off Offset from the top of the stack. This is zero
7802 * except in the retf case.
7803 * @param cbMem The number of bytes to pop from the stack.
7804 * @param ppvMem Where to return the pointer to the stack memory.
7805 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7806 * return this because all use of this function is
7807 * to retrieve a new value and anything we return
7808 * here would be discarded.)
7809 */
7810VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7811 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
7812{
7813 Assert(cbMem < UINT8_MAX);
7814
7815 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7816 RTGCPTR GCPtrTop;
7817 if (IEM_IS_64BIT_CODE(pVCpu))
7818 GCPtrTop = uCurNewRsp;
7819 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7820 GCPtrTop = (uint32_t)uCurNewRsp;
7821 else
7822 GCPtrTop = (uint16_t)uCurNewRsp;
7823
7824 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7825 0 /* checked in iemMemStackPopBeginSpecial */);
7826}
7827
7828
7829/**
7830 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7831 * iemMemStackPopContinueSpecial).
7832 *
7833 * The caller will manually commit the rSP.
7834 *
7835 * @returns Strict VBox status code.
7836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7837 * @param pvMem The pointer returned by
7838 * iemMemStackPopBeginSpecial() or
7839 * iemMemStackPopContinueSpecial().
7840 */
7841VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
7842{
7843 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7844}
7845
7846
7847/**
7848 * Fetches a system table byte.
7849 *
7850 * @returns Strict VBox status code.
7851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7852 * @param pbDst Where to return the byte.
7853 * @param iSegReg The index of the segment register to use for
7854 * this access. The base and limits are checked.
7855 * @param GCPtrMem The address of the guest memory.
7856 */
7857VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7858{
7859 /* The lazy approach for now... */
7860 uint8_t const *pbSrc;
7861 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7862 if (rc == VINF_SUCCESS)
7863 {
7864 *pbDst = *pbSrc;
7865 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
7866 }
7867 return rc;
7868}
7869
7870
7871/**
7872 * Fetches a system table word.
7873 *
7874 * @returns Strict VBox status code.
7875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7876 * @param pu16Dst Where to return the word.
7877 * @param iSegReg The index of the segment register to use for
7878 * this access. The base and limits are checked.
7879 * @param GCPtrMem The address of the guest memory.
7880 */
7881VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7882{
7883 /* The lazy approach for now... */
7884 uint16_t const *pu16Src;
7885 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7886 if (rc == VINF_SUCCESS)
7887 {
7888 *pu16Dst = *pu16Src;
7889 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
7890 }
7891 return rc;
7892}
7893
7894
7895/**
7896 * Fetches a system table dword.
7897 *
7898 * @returns Strict VBox status code.
7899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7900 * @param pu32Dst Where to return the dword.
7901 * @param iSegReg The index of the segment register to use for
7902 * this access. The base and limits are checked.
7903 * @param GCPtrMem The address of the guest memory.
7904 */
7905VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7906{
7907 /* The lazy approach for now... */
7908 uint32_t const *pu32Src;
7909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7910 if (rc == VINF_SUCCESS)
7911 {
7912 *pu32Dst = *pu32Src;
7913 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
7914 }
7915 return rc;
7916}
7917
7918
7919/**
7920 * Fetches a system table qword.
7921 *
7922 * @returns Strict VBox status code.
7923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7924 * @param pu64Dst Where to return the qword.
7925 * @param iSegReg The index of the segment register to use for
7926 * this access. The base and limits are checked.
7927 * @param GCPtrMem The address of the guest memory.
7928 */
7929VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7930{
7931 /* The lazy approach for now... */
7932 uint64_t const *pu64Src;
7933 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7934 if (rc == VINF_SUCCESS)
7935 {
7936 *pu64Dst = *pu64Src;
7937 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
7938 }
7939 return rc;
7940}
7941
7942
7943/**
7944 * Fetches a descriptor table entry with caller specified error code.
7945 *
7946 * @returns Strict VBox status code.
7947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7948 * @param pDesc Where to return the descriptor table entry.
7949 * @param uSel The selector which table entry to fetch.
7950 * @param uXcpt The exception to raise on table lookup error.
7951 * @param uErrorCode The error code associated with the exception.
7952 */
7953static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7954 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7955{
7956 AssertPtr(pDesc);
7957 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7958
7959 /** @todo did the 286 require all 8 bytes to be accessible? */
7960 /*
7961 * Get the selector table base and check bounds.
7962 */
7963 RTGCPTR GCPtrBase;
7964 if (uSel & X86_SEL_LDT)
7965 {
7966 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7967 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7968 {
7969 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7970 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7971 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7972 uErrorCode, 0);
7973 }
7974
7975 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7976 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7977 }
7978 else
7979 {
7980 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7981 {
7982 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7983 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7984 uErrorCode, 0);
7985 }
7986 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7987 }
7988
7989 /*
7990 * Read the legacy descriptor and maybe the long mode extensions if
7991 * required.
7992 */
7993 VBOXSTRICTRC rcStrict;
7994 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7995 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7996 else
7997 {
7998 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7999 if (rcStrict == VINF_SUCCESS)
8000 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8001 if (rcStrict == VINF_SUCCESS)
8002 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8003 if (rcStrict == VINF_SUCCESS)
8004 pDesc->Legacy.au16[3] = 0;
8005 else
8006 return rcStrict;
8007 }
8008
8009 if (rcStrict == VINF_SUCCESS)
8010 {
8011 if ( !IEM_IS_LONG_MODE(pVCpu)
8012 || pDesc->Legacy.Gen.u1DescType)
8013 pDesc->Long.au64[1] = 0;
8014 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8015 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8016 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8017 else
8018 {
8019 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8020 /** @todo is this the right exception? */
8021 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8022 }
8023 }
8024 return rcStrict;
8025}
8026
8027
8028/**
8029 * Fetches a descriptor table entry.
8030 *
8031 * @returns Strict VBox status code.
8032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8033 * @param pDesc Where to return the descriptor table entry.
8034 * @param uSel The selector which table entry to fetch.
8035 * @param uXcpt The exception to raise on table lookup error.
8036 */
8037VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8038{
8039 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8040}
8041
8042
8043/**
8044 * Marks the selector descriptor as accessed (only non-system descriptors).
8045 *
8046 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8047 * will therefore skip the limit checks.
8048 *
8049 * @returns Strict VBox status code.
8050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8051 * @param uSel The selector.
8052 */
8053VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8054{
8055 /*
8056 * Get the selector table base and calculate the entry address.
8057 */
8058 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8059 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8060 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8061 GCPtr += uSel & X86_SEL_MASK;
8062
8063 /*
8064 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8065 * ugly stuff to avoid this. This will make sure it's an atomic access
8066 * as well more or less remove any question about 8-bit or 32-bit accesss.
8067 */
8068 VBOXSTRICTRC rcStrict;
8069 uint32_t volatile *pu32;
8070 if ((GCPtr & 3) == 0)
8071 {
8072 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8073 GCPtr += 2 + 2;
8074 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8075 if (rcStrict != VINF_SUCCESS)
8076 return rcStrict;
8077 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8078 }
8079 else
8080 {
8081 /* The misaligned GDT/LDT case, map the whole thing. */
8082 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8083 if (rcStrict != VINF_SUCCESS)
8084 return rcStrict;
8085 switch ((uintptr_t)pu32 & 3)
8086 {
8087 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8088 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8089 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8090 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8091 }
8092 }
8093
8094 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8095}
8096
8097
8098#undef LOG_GROUP
8099#define LOG_GROUP LOG_GROUP_IEM
8100
8101/** @} */
8102
8103/** @name Opcode Helpers.
8104 * @{
8105 */
8106
8107/**
8108 * Calculates the effective address of a ModR/M memory operand.
8109 *
8110 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8111 *
8112 * @return Strict VBox status code.
8113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8114 * @param bRm The ModRM byte.
8115 * @param cbImmAndRspOffset - First byte: The size of any immediate
8116 * following the effective address opcode bytes
8117 * (only for RIP relative addressing).
8118 * - Second byte: RSP displacement (for POP [ESP]).
8119 * @param pGCPtrEff Where to return the effective address.
8120 */
8121VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8122{
8123 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8124# define SET_SS_DEF() \
8125 do \
8126 { \
8127 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8128 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8129 } while (0)
8130
8131 if (!IEM_IS_64BIT_CODE(pVCpu))
8132 {
8133/** @todo Check the effective address size crap! */
8134 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8135 {
8136 uint16_t u16EffAddr;
8137
8138 /* Handle the disp16 form with no registers first. */
8139 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8140 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8141 else
8142 {
8143 /* Get the displacment. */
8144 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8145 {
8146 case 0: u16EffAddr = 0; break;
8147 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8148 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8149 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8150 }
8151
8152 /* Add the base and index registers to the disp. */
8153 switch (bRm & X86_MODRM_RM_MASK)
8154 {
8155 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8156 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8157 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8158 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8159 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8160 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8161 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8162 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8163 }
8164 }
8165
8166 *pGCPtrEff = u16EffAddr;
8167 }
8168 else
8169 {
8170 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8171 uint32_t u32EffAddr;
8172
8173 /* Handle the disp32 form with no registers first. */
8174 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8175 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8176 else
8177 {
8178 /* Get the register (or SIB) value. */
8179 switch ((bRm & X86_MODRM_RM_MASK))
8180 {
8181 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8182 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8183 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8184 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8185 case 4: /* SIB */
8186 {
8187 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8188
8189 /* Get the index and scale it. */
8190 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8191 {
8192 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8193 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8194 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8195 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8196 case 4: u32EffAddr = 0; /*none */ break;
8197 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8198 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8199 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8200 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8201 }
8202 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8203
8204 /* add base */
8205 switch (bSib & X86_SIB_BASE_MASK)
8206 {
8207 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8208 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8209 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8210 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8211 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8212 case 5:
8213 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8214 {
8215 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8216 SET_SS_DEF();
8217 }
8218 else
8219 {
8220 uint32_t u32Disp;
8221 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8222 u32EffAddr += u32Disp;
8223 }
8224 break;
8225 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8226 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8227 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8228 }
8229 break;
8230 }
8231 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8232 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8233 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8234 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8235 }
8236
8237 /* Get and add the displacement. */
8238 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8239 {
8240 case 0:
8241 break;
8242 case 1:
8243 {
8244 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8245 u32EffAddr += i8Disp;
8246 break;
8247 }
8248 case 2:
8249 {
8250 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8251 u32EffAddr += u32Disp;
8252 break;
8253 }
8254 default:
8255 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8256 }
8257
8258 }
8259 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8260 *pGCPtrEff = u32EffAddr;
8261 else
8262 {
8263 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8264 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8265 }
8266 }
8267 }
8268 else
8269 {
8270 uint64_t u64EffAddr;
8271
8272 /* Handle the rip+disp32 form with no registers first. */
8273 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8274 {
8275 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8276 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8277 }
8278 else
8279 {
8280 /* Get the register (or SIB) value. */
8281 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8282 {
8283 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8284 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8285 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8286 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8287 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8288 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8289 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8290 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8291 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8292 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8293 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8294 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8295 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8296 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8297 /* SIB */
8298 case 4:
8299 case 12:
8300 {
8301 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8302
8303 /* Get the index and scale it. */
8304 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8305 {
8306 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8307 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8308 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8309 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8310 case 4: u64EffAddr = 0; /*none */ break;
8311 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8312 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8313 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8314 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8315 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8316 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8317 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8318 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8319 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8320 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8321 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8322 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8323 }
8324 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8325
8326 /* add base */
8327 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8328 {
8329 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8330 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8331 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8332 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8333 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8334 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8335 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8336 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8337 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8338 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8339 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8340 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8341 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8342 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8343 /* complicated encodings */
8344 case 5:
8345 case 13:
8346 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8347 {
8348 if (!pVCpu->iem.s.uRexB)
8349 {
8350 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8351 SET_SS_DEF();
8352 }
8353 else
8354 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8355 }
8356 else
8357 {
8358 uint32_t u32Disp;
8359 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8360 u64EffAddr += (int32_t)u32Disp;
8361 }
8362 break;
8363 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8364 }
8365 break;
8366 }
8367 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8368 }
8369
8370 /* Get and add the displacement. */
8371 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8372 {
8373 case 0:
8374 break;
8375 case 1:
8376 {
8377 int8_t i8Disp;
8378 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8379 u64EffAddr += i8Disp;
8380 break;
8381 }
8382 case 2:
8383 {
8384 uint32_t u32Disp;
8385 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8386 u64EffAddr += (int32_t)u32Disp;
8387 break;
8388 }
8389 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8390 }
8391
8392 }
8393
8394 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8395 *pGCPtrEff = u64EffAddr;
8396 else
8397 {
8398 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8399 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8400 }
8401 }
8402
8403 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8404 return VINF_SUCCESS;
8405}
8406
8407
8408#ifdef IEM_WITH_SETJMP
8409/**
8410 * Calculates the effective address of a ModR/M memory operand.
8411 *
8412 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8413 *
8414 * May longjmp on internal error.
8415 *
8416 * @return The effective address.
8417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8418 * @param bRm The ModRM byte.
8419 * @param cbImmAndRspOffset - First byte: The size of any immediate
8420 * following the effective address opcode bytes
8421 * (only for RIP relative addressing).
8422 * - Second byte: RSP displacement (for POP [ESP]).
8423 */
8424RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8425{
8426 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8427# define SET_SS_DEF() \
8428 do \
8429 { \
8430 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8431 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8432 } while (0)
8433
8434 if (!IEM_IS_64BIT_CODE(pVCpu))
8435 {
8436/** @todo Check the effective address size crap! */
8437 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8438 {
8439 uint16_t u16EffAddr;
8440
8441 /* Handle the disp16 form with no registers first. */
8442 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8443 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8444 else
8445 {
8446 /* Get the displacment. */
8447 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8448 {
8449 case 0: u16EffAddr = 0; break;
8450 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8451 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8452 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8453 }
8454
8455 /* Add the base and index registers to the disp. */
8456 switch (bRm & X86_MODRM_RM_MASK)
8457 {
8458 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8459 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8460 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8461 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8462 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8463 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8464 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8465 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8466 }
8467 }
8468
8469 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8470 return u16EffAddr;
8471 }
8472
8473 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8474 uint32_t u32EffAddr;
8475
8476 /* Handle the disp32 form with no registers first. */
8477 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8478 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8479 else
8480 {
8481 /* Get the register (or SIB) value. */
8482 switch ((bRm & X86_MODRM_RM_MASK))
8483 {
8484 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8485 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8486 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8487 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8488 case 4: /* SIB */
8489 {
8490 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8491
8492 /* Get the index and scale it. */
8493 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8494 {
8495 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8496 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8497 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8498 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8499 case 4: u32EffAddr = 0; /*none */ break;
8500 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8501 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8502 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8503 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8504 }
8505 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8506
8507 /* add base */
8508 switch (bSib & X86_SIB_BASE_MASK)
8509 {
8510 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8511 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8512 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8513 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8514 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8515 case 5:
8516 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8517 {
8518 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8519 SET_SS_DEF();
8520 }
8521 else
8522 {
8523 uint32_t u32Disp;
8524 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8525 u32EffAddr += u32Disp;
8526 }
8527 break;
8528 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8529 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8530 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8531 }
8532 break;
8533 }
8534 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8535 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8536 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8537 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8538 }
8539
8540 /* Get and add the displacement. */
8541 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8542 {
8543 case 0:
8544 break;
8545 case 1:
8546 {
8547 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8548 u32EffAddr += i8Disp;
8549 break;
8550 }
8551 case 2:
8552 {
8553 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8554 u32EffAddr += u32Disp;
8555 break;
8556 }
8557 default:
8558 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8559 }
8560 }
8561
8562 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8563 {
8564 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8565 return u32EffAddr;
8566 }
8567 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8568 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
8569 return u32EffAddr & UINT16_MAX;
8570 }
8571
8572 uint64_t u64EffAddr;
8573
8574 /* Handle the rip+disp32 form with no registers first. */
8575 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8576 {
8577 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8578 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8579 }
8580 else
8581 {
8582 /* Get the register (or SIB) value. */
8583 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8584 {
8585 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8586 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8587 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8588 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8589 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8590 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8591 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8592 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8593 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8594 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8595 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8596 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8597 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8598 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8599 /* SIB */
8600 case 4:
8601 case 12:
8602 {
8603 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8604
8605 /* Get the index and scale it. */
8606 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8607 {
8608 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8609 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8610 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8611 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8612 case 4: u64EffAddr = 0; /*none */ break;
8613 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8614 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8615 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8616 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8617 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8618 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8619 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8620 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8621 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8622 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8623 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8624 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8625 }
8626 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8627
8628 /* add base */
8629 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8630 {
8631 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8632 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8633 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8634 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8635 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8636 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8637 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8638 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8639 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8640 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8641 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8642 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8643 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8644 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8645 /* complicated encodings */
8646 case 5:
8647 case 13:
8648 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8649 {
8650 if (!pVCpu->iem.s.uRexB)
8651 {
8652 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8653 SET_SS_DEF();
8654 }
8655 else
8656 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8657 }
8658 else
8659 {
8660 uint32_t u32Disp;
8661 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8662 u64EffAddr += (int32_t)u32Disp;
8663 }
8664 break;
8665 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8666 }
8667 break;
8668 }
8669 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8670 }
8671
8672 /* Get and add the displacement. */
8673 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8674 {
8675 case 0:
8676 break;
8677 case 1:
8678 {
8679 int8_t i8Disp;
8680 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8681 u64EffAddr += i8Disp;
8682 break;
8683 }
8684 case 2:
8685 {
8686 uint32_t u32Disp;
8687 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8688 u64EffAddr += (int32_t)u32Disp;
8689 break;
8690 }
8691 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8692 }
8693
8694 }
8695
8696 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8697 {
8698 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8699 return u64EffAddr;
8700 }
8701 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8702 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8703 return u64EffAddr & UINT32_MAX;
8704}
8705#endif /* IEM_WITH_SETJMP */
8706
8707
8708/**
8709 * Calculates the effective address of a ModR/M memory operand, extended version
8710 * for use in the recompilers.
8711 *
8712 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8713 *
8714 * @return Strict VBox status code.
8715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8716 * @param bRm The ModRM byte.
8717 * @param cbImmAndRspOffset - First byte: The size of any immediate
8718 * following the effective address opcode bytes
8719 * (only for RIP relative addressing).
8720 * - Second byte: RSP displacement (for POP [ESP]).
8721 * @param pGCPtrEff Where to return the effective address.
8722 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8723 * SIB byte (bits 39:32).
8724 */
8725VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8726{
8727 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8728# define SET_SS_DEF() \
8729 do \
8730 { \
8731 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8732 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8733 } while (0)
8734
8735 uint64_t uInfo;
8736 if (!IEM_IS_64BIT_CODE(pVCpu))
8737 {
8738/** @todo Check the effective address size crap! */
8739 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8740 {
8741 uint16_t u16EffAddr;
8742
8743 /* Handle the disp16 form with no registers first. */
8744 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8745 {
8746 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8747 uInfo = u16EffAddr;
8748 }
8749 else
8750 {
8751 /* Get the displacment. */
8752 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8753 {
8754 case 0: u16EffAddr = 0; break;
8755 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8756 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8757 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8758 }
8759 uInfo = u16EffAddr;
8760
8761 /* Add the base and index registers to the disp. */
8762 switch (bRm & X86_MODRM_RM_MASK)
8763 {
8764 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8765 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8766 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8767 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8768 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8769 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8770 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8771 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8772 }
8773 }
8774
8775 *pGCPtrEff = u16EffAddr;
8776 }
8777 else
8778 {
8779 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8780 uint32_t u32EffAddr;
8781
8782 /* Handle the disp32 form with no registers first. */
8783 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8784 {
8785 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8786 uInfo = u32EffAddr;
8787 }
8788 else
8789 {
8790 /* Get the register (or SIB) value. */
8791 uInfo = 0;
8792 switch ((bRm & X86_MODRM_RM_MASK))
8793 {
8794 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8795 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8796 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8797 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8798 case 4: /* SIB */
8799 {
8800 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8801 uInfo = (uint64_t)bSib << 32;
8802
8803 /* Get the index and scale it. */
8804 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8805 {
8806 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8807 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8808 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8809 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8810 case 4: u32EffAddr = 0; /*none */ break;
8811 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8812 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8813 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8814 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8815 }
8816 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8817
8818 /* add base */
8819 switch (bSib & X86_SIB_BASE_MASK)
8820 {
8821 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8822 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8823 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8824 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8825 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8826 case 5:
8827 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8828 {
8829 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8830 SET_SS_DEF();
8831 }
8832 else
8833 {
8834 uint32_t u32Disp;
8835 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8836 u32EffAddr += u32Disp;
8837 uInfo |= u32Disp;
8838 }
8839 break;
8840 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8841 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8842 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8843 }
8844 break;
8845 }
8846 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8847 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8848 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8849 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8850 }
8851
8852 /* Get and add the displacement. */
8853 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8854 {
8855 case 0:
8856 break;
8857 case 1:
8858 {
8859 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8860 u32EffAddr += i8Disp;
8861 uInfo |= (uint32_t)(int32_t)i8Disp;
8862 break;
8863 }
8864 case 2:
8865 {
8866 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8867 u32EffAddr += u32Disp;
8868 uInfo |= (uint32_t)u32Disp;
8869 break;
8870 }
8871 default:
8872 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8873 }
8874
8875 }
8876 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8877 *pGCPtrEff = u32EffAddr;
8878 else
8879 {
8880 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8881 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8882 }
8883 }
8884 }
8885 else
8886 {
8887 uint64_t u64EffAddr;
8888
8889 /* Handle the rip+disp32 form with no registers first. */
8890 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8891 {
8892 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8893 uInfo = (uint32_t)u64EffAddr;
8894 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8895 }
8896 else
8897 {
8898 /* Get the register (or SIB) value. */
8899 uInfo = 0;
8900 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8901 {
8902 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8903 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8904 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8905 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8906 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8907 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8908 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8909 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8910 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8911 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8912 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8913 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8914 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8915 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8916 /* SIB */
8917 case 4:
8918 case 12:
8919 {
8920 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8921 uInfo = (uint64_t)bSib << 32;
8922
8923 /* Get the index and scale it. */
8924 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8925 {
8926 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8927 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8928 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8929 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8930 case 4: u64EffAddr = 0; /*none */ break;
8931 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8932 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8933 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8934 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8935 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8936 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8937 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8938 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8939 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8940 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8941 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8942 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8943 }
8944 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8945
8946 /* add base */
8947 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8948 {
8949 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8950 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8951 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8952 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8953 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8954 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8955 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8956 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8957 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8958 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8959 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8960 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8961 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8962 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8963 /* complicated encodings */
8964 case 5:
8965 case 13:
8966 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8967 {
8968 if (!pVCpu->iem.s.uRexB)
8969 {
8970 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8971 SET_SS_DEF();
8972 }
8973 else
8974 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8975 }
8976 else
8977 {
8978 uint32_t u32Disp;
8979 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8980 u64EffAddr += (int32_t)u32Disp;
8981 uInfo |= u32Disp;
8982 }
8983 break;
8984 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8985 }
8986 break;
8987 }
8988 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8989 }
8990
8991 /* Get and add the displacement. */
8992 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8993 {
8994 case 0:
8995 break;
8996 case 1:
8997 {
8998 int8_t i8Disp;
8999 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9000 u64EffAddr += i8Disp;
9001 uInfo |= (uint32_t)(int32_t)i8Disp;
9002 break;
9003 }
9004 case 2:
9005 {
9006 uint32_t u32Disp;
9007 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9008 u64EffAddr += (int32_t)u32Disp;
9009 uInfo |= u32Disp;
9010 break;
9011 }
9012 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9013 }
9014
9015 }
9016
9017 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9018 *pGCPtrEff = u64EffAddr;
9019 else
9020 {
9021 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9022 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9023 }
9024 }
9025 *puInfo = uInfo;
9026
9027 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9028 return VINF_SUCCESS;
9029}
9030
9031/** @} */
9032
9033
9034#ifdef LOG_ENABLED
9035/**
9036 * Logs the current instruction.
9037 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9038 * @param fSameCtx Set if we have the same context information as the VMM,
9039 * clear if we may have already executed an instruction in
9040 * our debug context. When clear, we assume IEMCPU holds
9041 * valid CPU mode info.
9042 *
9043 * The @a fSameCtx parameter is now misleading and obsolete.
9044 * @param pszFunction The IEM function doing the execution.
9045 */
9046static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9047{
9048# ifdef IN_RING3
9049 if (LogIs2Enabled())
9050 {
9051 char szInstr[256];
9052 uint32_t cbInstr = 0;
9053 if (fSameCtx)
9054 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9055 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9056 szInstr, sizeof(szInstr), &cbInstr);
9057 else
9058 {
9059 uint32_t fFlags = 0;
9060 switch (IEM_GET_CPU_MODE(pVCpu))
9061 {
9062 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9063 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9064 case IEMMODE_16BIT:
9065 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9066 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9067 else
9068 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9069 break;
9070 }
9071 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9072 szInstr, sizeof(szInstr), &cbInstr);
9073 }
9074
9075 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9076 Log2(("**** %s fExec=%x\n"
9077 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9078 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9079 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9080 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9081 " %s\n"
9082 , pszFunction, pVCpu->iem.s.fExec,
9083 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9084 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9085 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9086 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9087 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9088 szInstr));
9089
9090 if (LogIs3Enabled())
9091 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9092 }
9093 else
9094# endif
9095 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9096 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9097 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9098}
9099#endif /* LOG_ENABLED */
9100
9101
9102#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9103/**
9104 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9105 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9106 *
9107 * @returns Modified rcStrict.
9108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9109 * @param rcStrict The instruction execution status.
9110 */
9111static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9112{
9113 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9114 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9115 {
9116 /* VMX preemption timer takes priority over NMI-window exits. */
9117 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9118 {
9119 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9120 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9121 }
9122 /*
9123 * Check remaining intercepts.
9124 *
9125 * NMI-window and Interrupt-window VM-exits.
9126 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9127 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9128 *
9129 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9130 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9131 */
9132 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9133 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9134 && !TRPMHasTrap(pVCpu))
9135 {
9136 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9137 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9138 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9139 {
9140 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9141 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9142 }
9143 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9144 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9145 {
9146 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9147 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9148 }
9149 }
9150 }
9151 /* TPR-below threshold/APIC write has the highest priority. */
9152 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9153 {
9154 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9155 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9156 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9157 }
9158 /* MTF takes priority over VMX-preemption timer. */
9159 else
9160 {
9161 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9162 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9163 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9164 }
9165 return rcStrict;
9166}
9167#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9168
9169
9170/**
9171 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9172 * IEMExecOneWithPrefetchedByPC.
9173 *
9174 * Similar code is found in IEMExecLots.
9175 *
9176 * @return Strict VBox status code.
9177 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9178 * @param fExecuteInhibit If set, execute the instruction following CLI,
9179 * POP SS and MOV SS,GR.
9180 * @param pszFunction The calling function name.
9181 */
9182DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9183{
9184 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9185 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9186 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9187 RT_NOREF_PV(pszFunction);
9188
9189#ifdef IEM_WITH_SETJMP
9190 VBOXSTRICTRC rcStrict;
9191 IEM_TRY_SETJMP(pVCpu, rcStrict)
9192 {
9193 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9194 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9195 }
9196 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9197 {
9198 pVCpu->iem.s.cLongJumps++;
9199 }
9200 IEM_CATCH_LONGJMP_END(pVCpu);
9201#else
9202 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9203 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9204#endif
9205 if (rcStrict == VINF_SUCCESS)
9206 pVCpu->iem.s.cInstructions++;
9207 if (pVCpu->iem.s.cActiveMappings > 0)
9208 {
9209 Assert(rcStrict != VINF_SUCCESS);
9210 iemMemRollback(pVCpu);
9211 }
9212 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9213 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9214 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9215
9216//#ifdef DEBUG
9217// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9218//#endif
9219
9220#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9221 /*
9222 * Perform any VMX nested-guest instruction boundary actions.
9223 *
9224 * If any of these causes a VM-exit, we must skip executing the next
9225 * instruction (would run into stale page tables). A VM-exit makes sure
9226 * there is no interrupt-inhibition, so that should ensure we don't go
9227 * to try execute the next instruction. Clearing fExecuteInhibit is
9228 * problematic because of the setjmp/longjmp clobbering above.
9229 */
9230 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9231 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9232 || rcStrict != VINF_SUCCESS)
9233 { /* likely */ }
9234 else
9235 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9236#endif
9237
9238 /* Execute the next instruction as well if a cli, pop ss or
9239 mov ss, Gr has just completed successfully. */
9240 if ( fExecuteInhibit
9241 && rcStrict == VINF_SUCCESS
9242 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9243 {
9244 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9245 if (rcStrict == VINF_SUCCESS)
9246 {
9247#ifdef LOG_ENABLED
9248 iemLogCurInstr(pVCpu, false, pszFunction);
9249#endif
9250#ifdef IEM_WITH_SETJMP
9251 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9252 {
9253 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9254 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9255 }
9256 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9257 {
9258 pVCpu->iem.s.cLongJumps++;
9259 }
9260 IEM_CATCH_LONGJMP_END(pVCpu);
9261#else
9262 IEM_OPCODE_GET_FIRST_U8(&b);
9263 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9264#endif
9265 if (rcStrict == VINF_SUCCESS)
9266 {
9267 pVCpu->iem.s.cInstructions++;
9268#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9269 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9270 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9271 { /* likely */ }
9272 else
9273 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9274#endif
9275 }
9276 if (pVCpu->iem.s.cActiveMappings > 0)
9277 {
9278 Assert(rcStrict != VINF_SUCCESS);
9279 iemMemRollback(pVCpu);
9280 }
9281 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9282 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9283 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9284 }
9285 else if (pVCpu->iem.s.cActiveMappings > 0)
9286 iemMemRollback(pVCpu);
9287 /** @todo drop this after we bake this change into RIP advancing. */
9288 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9289 }
9290
9291 /*
9292 * Return value fiddling, statistics and sanity assertions.
9293 */
9294 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9295
9296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9298 return rcStrict;
9299}
9300
9301
9302/**
9303 * Execute one instruction.
9304 *
9305 * @return Strict VBox status code.
9306 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9307 */
9308VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9309{
9310 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9311#ifdef LOG_ENABLED
9312 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9313#endif
9314
9315 /*
9316 * Do the decoding and emulation.
9317 */
9318 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9319 if (rcStrict == VINF_SUCCESS)
9320 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9321 else if (pVCpu->iem.s.cActiveMappings > 0)
9322 iemMemRollback(pVCpu);
9323
9324 if (rcStrict != VINF_SUCCESS)
9325 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9327 return rcStrict;
9328}
9329
9330
9331VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9332{
9333 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9334 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9335 if (rcStrict == VINF_SUCCESS)
9336 {
9337 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9338 if (pcbWritten)
9339 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9340 }
9341 else if (pVCpu->iem.s.cActiveMappings > 0)
9342 iemMemRollback(pVCpu);
9343
9344 return rcStrict;
9345}
9346
9347
9348VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9349 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9350{
9351 VBOXSTRICTRC rcStrict;
9352 if ( cbOpcodeBytes
9353 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9354 {
9355 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9356#ifdef IEM_WITH_CODE_TLB
9357 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9358 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9359 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9360 pVCpu->iem.s.offCurInstrStart = 0;
9361 pVCpu->iem.s.offInstrNextByte = 0;
9362 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9363#else
9364 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9365 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9366#endif
9367 rcStrict = VINF_SUCCESS;
9368 }
9369 else
9370 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9371 if (rcStrict == VINF_SUCCESS)
9372 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9373 else if (pVCpu->iem.s.cActiveMappings > 0)
9374 iemMemRollback(pVCpu);
9375
9376 return rcStrict;
9377}
9378
9379
9380VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9381{
9382 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9383 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9384 if (rcStrict == VINF_SUCCESS)
9385 {
9386 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9387 if (pcbWritten)
9388 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9389 }
9390 else if (pVCpu->iem.s.cActiveMappings > 0)
9391 iemMemRollback(pVCpu);
9392
9393 return rcStrict;
9394}
9395
9396
9397VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9398 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9399{
9400 VBOXSTRICTRC rcStrict;
9401 if ( cbOpcodeBytes
9402 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9403 {
9404 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9405#ifdef IEM_WITH_CODE_TLB
9406 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9407 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9408 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9409 pVCpu->iem.s.offCurInstrStart = 0;
9410 pVCpu->iem.s.offInstrNextByte = 0;
9411 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9412#else
9413 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9414 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9415#endif
9416 rcStrict = VINF_SUCCESS;
9417 }
9418 else
9419 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9420 if (rcStrict == VINF_SUCCESS)
9421 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9422 else if (pVCpu->iem.s.cActiveMappings > 0)
9423 iemMemRollback(pVCpu);
9424
9425 return rcStrict;
9426}
9427
9428
9429/**
9430 * For handling split cacheline lock operations when the host has split-lock
9431 * detection enabled.
9432 *
9433 * This will cause the interpreter to disregard the lock prefix and implicit
9434 * locking (xchg).
9435 *
9436 * @returns Strict VBox status code.
9437 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9438 */
9439VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9440{
9441 /*
9442 * Do the decoding and emulation.
9443 */
9444 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9445 if (rcStrict == VINF_SUCCESS)
9446 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9447 else if (pVCpu->iem.s.cActiveMappings > 0)
9448 iemMemRollback(pVCpu);
9449
9450 if (rcStrict != VINF_SUCCESS)
9451 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9452 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9453 return rcStrict;
9454}
9455
9456
9457/**
9458 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9459 * inject a pending TRPM trap.
9460 */
9461VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9462{
9463 Assert(TRPMHasTrap(pVCpu));
9464
9465 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9466 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9467 {
9468 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9469#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9470 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9471 if (fIntrEnabled)
9472 {
9473 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9474 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9475 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9476 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9477 else
9478 {
9479 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9480 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9481 }
9482 }
9483#else
9484 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9485#endif
9486 if (fIntrEnabled)
9487 {
9488 uint8_t u8TrapNo;
9489 TRPMEVENT enmType;
9490 uint32_t uErrCode;
9491 RTGCPTR uCr2;
9492 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9493 AssertRC(rc2);
9494 Assert(enmType == TRPM_HARDWARE_INT);
9495 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9496
9497 TRPMResetTrap(pVCpu);
9498
9499#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9500 /* Injecting an event may cause a VM-exit. */
9501 if ( rcStrict != VINF_SUCCESS
9502 && rcStrict != VINF_IEM_RAISED_XCPT)
9503 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9504#else
9505 NOREF(rcStrict);
9506#endif
9507 }
9508 }
9509
9510 return VINF_SUCCESS;
9511}
9512
9513
9514VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9515{
9516 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9517 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9518 Assert(cMaxInstructions > 0);
9519
9520 /*
9521 * See if there is an interrupt pending in TRPM, inject it if we can.
9522 */
9523 /** @todo What if we are injecting an exception and not an interrupt? Is that
9524 * possible here? For now we assert it is indeed only an interrupt. */
9525 if (!TRPMHasTrap(pVCpu))
9526 { /* likely */ }
9527 else
9528 {
9529 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9530 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9531 { /*likely */ }
9532 else
9533 return rcStrict;
9534 }
9535
9536 /*
9537 * Initial decoder init w/ prefetch, then setup setjmp.
9538 */
9539 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9540 if (rcStrict == VINF_SUCCESS)
9541 {
9542#ifdef IEM_WITH_SETJMP
9543 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9544 IEM_TRY_SETJMP(pVCpu, rcStrict)
9545#endif
9546 {
9547 /*
9548 * The run loop. We limit ourselves to 4096 instructions right now.
9549 */
9550 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9551 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9552 for (;;)
9553 {
9554 /*
9555 * Log the state.
9556 */
9557#ifdef LOG_ENABLED
9558 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9559#endif
9560
9561 /*
9562 * Do the decoding and emulation.
9563 */
9564 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9565 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9566#ifdef VBOX_STRICT
9567 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9568#endif
9569 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9570 {
9571 Assert(pVCpu->iem.s.cActiveMappings == 0);
9572 pVCpu->iem.s.cInstructions++;
9573
9574#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9575 /* Perform any VMX nested-guest instruction boundary actions. */
9576 uint64_t fCpu = pVCpu->fLocalForcedActions;
9577 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9578 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9579 { /* likely */ }
9580 else
9581 {
9582 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9583 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9584 fCpu = pVCpu->fLocalForcedActions;
9585 else
9586 {
9587 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9588 break;
9589 }
9590 }
9591#endif
9592 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9593 {
9594#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9595 uint64_t fCpu = pVCpu->fLocalForcedActions;
9596#endif
9597 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9598 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9599 | VMCPU_FF_TLB_FLUSH
9600 | VMCPU_FF_UNHALT );
9601
9602 if (RT_LIKELY( ( !fCpu
9603 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9604 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9605 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9606 {
9607 if (--cMaxInstructionsGccStupidity > 0)
9608 {
9609 /* Poll timers every now an then according to the caller's specs. */
9610 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9611 || !TMTimerPollBool(pVM, pVCpu))
9612 {
9613 Assert(pVCpu->iem.s.cActiveMappings == 0);
9614 iemReInitDecoder(pVCpu);
9615 continue;
9616 }
9617 }
9618 }
9619 }
9620 Assert(pVCpu->iem.s.cActiveMappings == 0);
9621 }
9622 else if (pVCpu->iem.s.cActiveMappings > 0)
9623 iemMemRollback(pVCpu);
9624 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9625 break;
9626 }
9627 }
9628#ifdef IEM_WITH_SETJMP
9629 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9630 {
9631 if (pVCpu->iem.s.cActiveMappings > 0)
9632 iemMemRollback(pVCpu);
9633# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9634 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9635# endif
9636 pVCpu->iem.s.cLongJumps++;
9637 }
9638 IEM_CATCH_LONGJMP_END(pVCpu);
9639#endif
9640
9641 /*
9642 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9643 */
9644 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9645 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9646 }
9647 else
9648 {
9649 if (pVCpu->iem.s.cActiveMappings > 0)
9650 iemMemRollback(pVCpu);
9651
9652#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9653 /*
9654 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9655 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9656 */
9657 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9658#endif
9659 }
9660
9661 /*
9662 * Maybe re-enter raw-mode and log.
9663 */
9664 if (rcStrict != VINF_SUCCESS)
9665 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9666 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9667 if (pcInstructions)
9668 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9669 return rcStrict;
9670}
9671
9672
9673/**
9674 * Interface used by EMExecuteExec, does exit statistics and limits.
9675 *
9676 * @returns Strict VBox status code.
9677 * @param pVCpu The cross context virtual CPU structure.
9678 * @param fWillExit To be defined.
9679 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9680 * @param cMaxInstructions Maximum number of instructions to execute.
9681 * @param cMaxInstructionsWithoutExits
9682 * The max number of instructions without exits.
9683 * @param pStats Where to return statistics.
9684 */
9685VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9686 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9687{
9688 NOREF(fWillExit); /** @todo define flexible exit crits */
9689
9690 /*
9691 * Initialize return stats.
9692 */
9693 pStats->cInstructions = 0;
9694 pStats->cExits = 0;
9695 pStats->cMaxExitDistance = 0;
9696 pStats->cReserved = 0;
9697
9698 /*
9699 * Initial decoder init w/ prefetch, then setup setjmp.
9700 */
9701 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9702 if (rcStrict == VINF_SUCCESS)
9703 {
9704#ifdef IEM_WITH_SETJMP
9705 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9706 IEM_TRY_SETJMP(pVCpu, rcStrict)
9707#endif
9708 {
9709#ifdef IN_RING0
9710 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9711#endif
9712 uint32_t cInstructionSinceLastExit = 0;
9713
9714 /*
9715 * The run loop. We limit ourselves to 4096 instructions right now.
9716 */
9717 PVM pVM = pVCpu->CTX_SUFF(pVM);
9718 for (;;)
9719 {
9720 /*
9721 * Log the state.
9722 */
9723#ifdef LOG_ENABLED
9724 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9725#endif
9726
9727 /*
9728 * Do the decoding and emulation.
9729 */
9730 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9731
9732 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9733 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9734
9735 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9736 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9737 {
9738 pStats->cExits += 1;
9739 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9740 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9741 cInstructionSinceLastExit = 0;
9742 }
9743
9744 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9745 {
9746 Assert(pVCpu->iem.s.cActiveMappings == 0);
9747 pVCpu->iem.s.cInstructions++;
9748 pStats->cInstructions++;
9749 cInstructionSinceLastExit++;
9750
9751#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9752 /* Perform any VMX nested-guest instruction boundary actions. */
9753 uint64_t fCpu = pVCpu->fLocalForcedActions;
9754 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9755 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9756 { /* likely */ }
9757 else
9758 {
9759 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9760 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9761 fCpu = pVCpu->fLocalForcedActions;
9762 else
9763 {
9764 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9765 break;
9766 }
9767 }
9768#endif
9769 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9770 {
9771#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9772 uint64_t fCpu = pVCpu->fLocalForcedActions;
9773#endif
9774 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9775 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9776 | VMCPU_FF_TLB_FLUSH
9777 | VMCPU_FF_UNHALT );
9778 if (RT_LIKELY( ( ( !fCpu
9779 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9780 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9781 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9782 || pStats->cInstructions < cMinInstructions))
9783 {
9784 if (pStats->cInstructions < cMaxInstructions)
9785 {
9786 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9787 {
9788#ifdef IN_RING0
9789 if ( !fCheckPreemptionPending
9790 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9791#endif
9792 {
9793 Assert(pVCpu->iem.s.cActiveMappings == 0);
9794 iemReInitDecoder(pVCpu);
9795 continue;
9796 }
9797#ifdef IN_RING0
9798 rcStrict = VINF_EM_RAW_INTERRUPT;
9799 break;
9800#endif
9801 }
9802 }
9803 }
9804 Assert(!(fCpu & VMCPU_FF_IEM));
9805 }
9806 Assert(pVCpu->iem.s.cActiveMappings == 0);
9807 }
9808 else if (pVCpu->iem.s.cActiveMappings > 0)
9809 iemMemRollback(pVCpu);
9810 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9811 break;
9812 }
9813 }
9814#ifdef IEM_WITH_SETJMP
9815 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9816 {
9817 if (pVCpu->iem.s.cActiveMappings > 0)
9818 iemMemRollback(pVCpu);
9819 pVCpu->iem.s.cLongJumps++;
9820 }
9821 IEM_CATCH_LONGJMP_END(pVCpu);
9822#endif
9823
9824 /*
9825 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9826 */
9827 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9828 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9829 }
9830 else
9831 {
9832 if (pVCpu->iem.s.cActiveMappings > 0)
9833 iemMemRollback(pVCpu);
9834
9835#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9836 /*
9837 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9838 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9839 */
9840 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9841#endif
9842 }
9843
9844 /*
9845 * Maybe re-enter raw-mode and log.
9846 */
9847 if (rcStrict != VINF_SUCCESS)
9848 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9849 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9850 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9851 return rcStrict;
9852}
9853
9854
9855/**
9856 * Injects a trap, fault, abort, software interrupt or external interrupt.
9857 *
9858 * The parameter list matches TRPMQueryTrapAll pretty closely.
9859 *
9860 * @returns Strict VBox status code.
9861 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9862 * @param u8TrapNo The trap number.
9863 * @param enmType What type is it (trap/fault/abort), software
9864 * interrupt or hardware interrupt.
9865 * @param uErrCode The error code if applicable.
9866 * @param uCr2 The CR2 value if applicable.
9867 * @param cbInstr The instruction length (only relevant for
9868 * software interrupts).
9869 */
9870VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9871 uint8_t cbInstr)
9872{
9873 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9874#ifdef DBGFTRACE_ENABLED
9875 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9876 u8TrapNo, enmType, uErrCode, uCr2);
9877#endif
9878
9879 uint32_t fFlags;
9880 switch (enmType)
9881 {
9882 case TRPM_HARDWARE_INT:
9883 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9884 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9885 uErrCode = uCr2 = 0;
9886 break;
9887
9888 case TRPM_SOFTWARE_INT:
9889 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9890 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9891 uErrCode = uCr2 = 0;
9892 break;
9893
9894 case TRPM_TRAP:
9895 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9896 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9897 if (u8TrapNo == X86_XCPT_PF)
9898 fFlags |= IEM_XCPT_FLAGS_CR2;
9899 switch (u8TrapNo)
9900 {
9901 case X86_XCPT_DF:
9902 case X86_XCPT_TS:
9903 case X86_XCPT_NP:
9904 case X86_XCPT_SS:
9905 case X86_XCPT_PF:
9906 case X86_XCPT_AC:
9907 case X86_XCPT_GP:
9908 fFlags |= IEM_XCPT_FLAGS_ERR;
9909 break;
9910 }
9911 break;
9912
9913 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9914 }
9915
9916 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9917
9918 if (pVCpu->iem.s.cActiveMappings > 0)
9919 iemMemRollback(pVCpu);
9920
9921 return rcStrict;
9922}
9923
9924
9925/**
9926 * Injects the active TRPM event.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure.
9930 */
9931VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9932{
9933#ifndef IEM_IMPLEMENTS_TASKSWITCH
9934 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9935#else
9936 uint8_t u8TrapNo;
9937 TRPMEVENT enmType;
9938 uint32_t uErrCode;
9939 RTGCUINTPTR uCr2;
9940 uint8_t cbInstr;
9941 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9942 if (RT_FAILURE(rc))
9943 return rc;
9944
9945 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9946 * ICEBP \#DB injection as a special case. */
9947 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9948#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9949 if (rcStrict == VINF_SVM_VMEXIT)
9950 rcStrict = VINF_SUCCESS;
9951#endif
9952#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9953 if (rcStrict == VINF_VMX_VMEXIT)
9954 rcStrict = VINF_SUCCESS;
9955#endif
9956 /** @todo Are there any other codes that imply the event was successfully
9957 * delivered to the guest? See @bugref{6607}. */
9958 if ( rcStrict == VINF_SUCCESS
9959 || rcStrict == VINF_IEM_RAISED_XCPT)
9960 TRPMResetTrap(pVCpu);
9961
9962 return rcStrict;
9963#endif
9964}
9965
9966
9967VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9968{
9969 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9970 return VERR_NOT_IMPLEMENTED;
9971}
9972
9973
9974VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9975{
9976 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9977 return VERR_NOT_IMPLEMENTED;
9978}
9979
9980
9981/**
9982 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9983 *
9984 * This API ASSUMES that the caller has already verified that the guest code is
9985 * allowed to access the I/O port. (The I/O port is in the DX register in the
9986 * guest state.)
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure.
9990 * @param cbValue The size of the I/O port access (1, 2, or 4).
9991 * @param enmAddrMode The addressing mode.
9992 * @param fRepPrefix Indicates whether a repeat prefix is used
9993 * (doesn't matter which for this instruction).
9994 * @param cbInstr The instruction length in bytes.
9995 * @param iEffSeg The effective segment address.
9996 * @param fIoChecked Whether the access to the I/O port has been
9997 * checked or not. It's typically checked in the
9998 * HM scenario.
9999 */
10000VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10001 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10002{
10003 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10004 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10005
10006 /*
10007 * State init.
10008 */
10009 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10010
10011 /*
10012 * Switch orgy for getting to the right handler.
10013 */
10014 VBOXSTRICTRC rcStrict;
10015 if (fRepPrefix)
10016 {
10017 switch (enmAddrMode)
10018 {
10019 case IEMMODE_16BIT:
10020 switch (cbValue)
10021 {
10022 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10023 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10024 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10025 default:
10026 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10027 }
10028 break;
10029
10030 case IEMMODE_32BIT:
10031 switch (cbValue)
10032 {
10033 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10034 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10035 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10036 default:
10037 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10038 }
10039 break;
10040
10041 case IEMMODE_64BIT:
10042 switch (cbValue)
10043 {
10044 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10045 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10046 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10047 default:
10048 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10049 }
10050 break;
10051
10052 default:
10053 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10054 }
10055 }
10056 else
10057 {
10058 switch (enmAddrMode)
10059 {
10060 case IEMMODE_16BIT:
10061 switch (cbValue)
10062 {
10063 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10064 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10065 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10066 default:
10067 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10068 }
10069 break;
10070
10071 case IEMMODE_32BIT:
10072 switch (cbValue)
10073 {
10074 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10075 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10076 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10077 default:
10078 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10079 }
10080 break;
10081
10082 case IEMMODE_64BIT:
10083 switch (cbValue)
10084 {
10085 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10086 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10087 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10088 default:
10089 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10090 }
10091 break;
10092
10093 default:
10094 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10095 }
10096 }
10097
10098 if (pVCpu->iem.s.cActiveMappings)
10099 iemMemRollback(pVCpu);
10100
10101 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10102}
10103
10104
10105/**
10106 * Interface for HM and EM for executing string I/O IN (read) instructions.
10107 *
10108 * This API ASSUMES that the caller has already verified that the guest code is
10109 * allowed to access the I/O port. (The I/O port is in the DX register in the
10110 * guest state.)
10111 *
10112 * @returns Strict VBox status code.
10113 * @param pVCpu The cross context virtual CPU structure.
10114 * @param cbValue The size of the I/O port access (1, 2, or 4).
10115 * @param enmAddrMode The addressing mode.
10116 * @param fRepPrefix Indicates whether a repeat prefix is used
10117 * (doesn't matter which for this instruction).
10118 * @param cbInstr The instruction length in bytes.
10119 * @param fIoChecked Whether the access to the I/O port has been
10120 * checked or not. It's typically checked in the
10121 * HM scenario.
10122 */
10123VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10124 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10125{
10126 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10127
10128 /*
10129 * State init.
10130 */
10131 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10132
10133 /*
10134 * Switch orgy for getting to the right handler.
10135 */
10136 VBOXSTRICTRC rcStrict;
10137 if (fRepPrefix)
10138 {
10139 switch (enmAddrMode)
10140 {
10141 case IEMMODE_16BIT:
10142 switch (cbValue)
10143 {
10144 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10145 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10146 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10147 default:
10148 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10149 }
10150 break;
10151
10152 case IEMMODE_32BIT:
10153 switch (cbValue)
10154 {
10155 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10156 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10157 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10158 default:
10159 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10160 }
10161 break;
10162
10163 case IEMMODE_64BIT:
10164 switch (cbValue)
10165 {
10166 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10167 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10168 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10169 default:
10170 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10171 }
10172 break;
10173
10174 default:
10175 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10176 }
10177 }
10178 else
10179 {
10180 switch (enmAddrMode)
10181 {
10182 case IEMMODE_16BIT:
10183 switch (cbValue)
10184 {
10185 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10186 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10187 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10188 default:
10189 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10190 }
10191 break;
10192
10193 case IEMMODE_32BIT:
10194 switch (cbValue)
10195 {
10196 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10197 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10198 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10199 default:
10200 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10201 }
10202 break;
10203
10204 case IEMMODE_64BIT:
10205 switch (cbValue)
10206 {
10207 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10208 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10209 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10210 default:
10211 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10212 }
10213 break;
10214
10215 default:
10216 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10217 }
10218 }
10219
10220 if ( pVCpu->iem.s.cActiveMappings == 0
10221 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10222 { /* likely */ }
10223 else
10224 {
10225 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10226 iemMemRollback(pVCpu);
10227 }
10228 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10229}
10230
10231
10232/**
10233 * Interface for rawmode to write execute an OUT instruction.
10234 *
10235 * @returns Strict VBox status code.
10236 * @param pVCpu The cross context virtual CPU structure.
10237 * @param cbInstr The instruction length in bytes.
10238 * @param u16Port The port to read.
10239 * @param fImm Whether the port is specified using an immediate operand or
10240 * using the implicit DX register.
10241 * @param cbReg The register size.
10242 *
10243 * @remarks In ring-0 not all of the state needs to be synced in.
10244 */
10245VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10246{
10247 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10248 Assert(cbReg <= 4 && cbReg != 3);
10249
10250 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10251 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10252 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10253 Assert(!pVCpu->iem.s.cActiveMappings);
10254 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10255}
10256
10257
10258/**
10259 * Interface for rawmode to write execute an IN instruction.
10260 *
10261 * @returns Strict VBox status code.
10262 * @param pVCpu The cross context virtual CPU structure.
10263 * @param cbInstr The instruction length in bytes.
10264 * @param u16Port The port to read.
10265 * @param fImm Whether the port is specified using an immediate operand or
10266 * using the implicit DX.
10267 * @param cbReg The register size.
10268 */
10269VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10270{
10271 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10272 Assert(cbReg <= 4 && cbReg != 3);
10273
10274 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10275 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10276 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10277 Assert(!pVCpu->iem.s.cActiveMappings);
10278 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10279}
10280
10281
10282/**
10283 * Interface for HM and EM to write to a CRx register.
10284 *
10285 * @returns Strict VBox status code.
10286 * @param pVCpu The cross context virtual CPU structure.
10287 * @param cbInstr The instruction length in bytes.
10288 * @param iCrReg The control register number (destination).
10289 * @param iGReg The general purpose register number (source).
10290 *
10291 * @remarks In ring-0 not all of the state needs to be synced in.
10292 */
10293VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10294{
10295 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10296 Assert(iCrReg < 16);
10297 Assert(iGReg < 16);
10298
10299 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10300 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10301 Assert(!pVCpu->iem.s.cActiveMappings);
10302 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10303}
10304
10305
10306/**
10307 * Interface for HM and EM to read from a CRx register.
10308 *
10309 * @returns Strict VBox status code.
10310 * @param pVCpu The cross context virtual CPU structure.
10311 * @param cbInstr The instruction length in bytes.
10312 * @param iGReg The general purpose register number (destination).
10313 * @param iCrReg The control register number (source).
10314 *
10315 * @remarks In ring-0 not all of the state needs to be synced in.
10316 */
10317VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10318{
10319 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10320 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10321 | CPUMCTX_EXTRN_APIC_TPR);
10322 Assert(iCrReg < 16);
10323 Assert(iGReg < 16);
10324
10325 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10326 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10327 Assert(!pVCpu->iem.s.cActiveMappings);
10328 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10329}
10330
10331
10332/**
10333 * Interface for HM and EM to write to a DRx register.
10334 *
10335 * @returns Strict VBox status code.
10336 * @param pVCpu The cross context virtual CPU structure.
10337 * @param cbInstr The instruction length in bytes.
10338 * @param iDrReg The debug register number (destination).
10339 * @param iGReg The general purpose register number (source).
10340 *
10341 * @remarks In ring-0 not all of the state needs to be synced in.
10342 */
10343VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10344{
10345 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10346 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10347 Assert(iDrReg < 8);
10348 Assert(iGReg < 16);
10349
10350 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10352 Assert(!pVCpu->iem.s.cActiveMappings);
10353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10354}
10355
10356
10357/**
10358 * Interface for HM and EM to read from a DRx register.
10359 *
10360 * @returns Strict VBox status code.
10361 * @param pVCpu The cross context virtual CPU structure.
10362 * @param cbInstr The instruction length in bytes.
10363 * @param iGReg The general purpose register number (destination).
10364 * @param iDrReg The debug register number (source).
10365 *
10366 * @remarks In ring-0 not all of the state needs to be synced in.
10367 */
10368VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10369{
10370 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10371 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10372 Assert(iDrReg < 8);
10373 Assert(iGReg < 16);
10374
10375 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10376 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10377 Assert(!pVCpu->iem.s.cActiveMappings);
10378 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10379}
10380
10381
10382/**
10383 * Interface for HM and EM to clear the CR0[TS] bit.
10384 *
10385 * @returns Strict VBox status code.
10386 * @param pVCpu The cross context virtual CPU structure.
10387 * @param cbInstr The instruction length in bytes.
10388 *
10389 * @remarks In ring-0 not all of the state needs to be synced in.
10390 */
10391VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10392{
10393 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10394
10395 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10396 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10397 Assert(!pVCpu->iem.s.cActiveMappings);
10398 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10399}
10400
10401
10402/**
10403 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10404 *
10405 * @returns Strict VBox status code.
10406 * @param pVCpu The cross context virtual CPU structure.
10407 * @param cbInstr The instruction length in bytes.
10408 * @param uValue The value to load into CR0.
10409 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10410 * memory operand. Otherwise pass NIL_RTGCPTR.
10411 *
10412 * @remarks In ring-0 not all of the state needs to be synced in.
10413 */
10414VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10415{
10416 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10417
10418 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10419 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10420 Assert(!pVCpu->iem.s.cActiveMappings);
10421 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10422}
10423
10424
10425/**
10426 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10427 *
10428 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10429 *
10430 * @returns Strict VBox status code.
10431 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10432 * @param cbInstr The instruction length in bytes.
10433 * @remarks In ring-0 not all of the state needs to be synced in.
10434 * @thread EMT(pVCpu)
10435 */
10436VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10437{
10438 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10439
10440 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10441 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10442 Assert(!pVCpu->iem.s.cActiveMappings);
10443 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10444}
10445
10446
10447/**
10448 * Interface for HM and EM to emulate the WBINVD instruction.
10449 *
10450 * @returns Strict VBox status code.
10451 * @param pVCpu The cross context virtual CPU structure.
10452 * @param cbInstr The instruction length in bytes.
10453 *
10454 * @remarks In ring-0 not all of the state needs to be synced in.
10455 */
10456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10457{
10458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10459
10460 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10461 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10462 Assert(!pVCpu->iem.s.cActiveMappings);
10463 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10464}
10465
10466
10467/**
10468 * Interface for HM and EM to emulate the INVD instruction.
10469 *
10470 * @returns Strict VBox status code.
10471 * @param pVCpu The cross context virtual CPU structure.
10472 * @param cbInstr The instruction length in bytes.
10473 *
10474 * @remarks In ring-0 not all of the state needs to be synced in.
10475 */
10476VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10477{
10478 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10479
10480 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10481 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10482 Assert(!pVCpu->iem.s.cActiveMappings);
10483 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10484}
10485
10486
10487/**
10488 * Interface for HM and EM to emulate the INVLPG instruction.
10489 *
10490 * @returns Strict VBox status code.
10491 * @retval VINF_PGM_SYNC_CR3
10492 *
10493 * @param pVCpu The cross context virtual CPU structure.
10494 * @param cbInstr The instruction length in bytes.
10495 * @param GCPtrPage The effective address of the page to invalidate.
10496 *
10497 * @remarks In ring-0 not all of the state needs to be synced in.
10498 */
10499VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10500{
10501 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10502
10503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10505 Assert(!pVCpu->iem.s.cActiveMappings);
10506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10507}
10508
10509
10510/**
10511 * Interface for HM and EM to emulate the INVPCID instruction.
10512 *
10513 * @returns Strict VBox status code.
10514 * @retval VINF_PGM_SYNC_CR3
10515 *
10516 * @param pVCpu The cross context virtual CPU structure.
10517 * @param cbInstr The instruction length in bytes.
10518 * @param iEffSeg The effective segment register.
10519 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10520 * @param uType The invalidation type.
10521 *
10522 * @remarks In ring-0 not all of the state needs to be synced in.
10523 */
10524VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10525 uint64_t uType)
10526{
10527 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10528
10529 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10530 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10531 Assert(!pVCpu->iem.s.cActiveMappings);
10532 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10533}
10534
10535
10536/**
10537 * Interface for HM and EM to emulate the CPUID instruction.
10538 *
10539 * @returns Strict VBox status code.
10540 *
10541 * @param pVCpu The cross context virtual CPU structure.
10542 * @param cbInstr The instruction length in bytes.
10543 *
10544 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10545 */
10546VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10547{
10548 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10549 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10550
10551 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10552 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10553 Assert(!pVCpu->iem.s.cActiveMappings);
10554 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10555}
10556
10557
10558/**
10559 * Interface for HM and EM to emulate the RDPMC instruction.
10560 *
10561 * @returns Strict VBox status code.
10562 *
10563 * @param pVCpu The cross context virtual CPU structure.
10564 * @param cbInstr The instruction length in bytes.
10565 *
10566 * @remarks Not all of the state needs to be synced in.
10567 */
10568VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10569{
10570 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10571 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10572
10573 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10574 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10575 Assert(!pVCpu->iem.s.cActiveMappings);
10576 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10577}
10578
10579
10580/**
10581 * Interface for HM and EM to emulate the RDTSC instruction.
10582 *
10583 * @returns Strict VBox status code.
10584 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10585 *
10586 * @param pVCpu The cross context virtual CPU structure.
10587 * @param cbInstr The instruction length in bytes.
10588 *
10589 * @remarks Not all of the state needs to be synced in.
10590 */
10591VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10592{
10593 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10594 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10595
10596 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10597 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10598 Assert(!pVCpu->iem.s.cActiveMappings);
10599 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10600}
10601
10602
10603/**
10604 * Interface for HM and EM to emulate the RDTSCP instruction.
10605 *
10606 * @returns Strict VBox status code.
10607 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10608 *
10609 * @param pVCpu The cross context virtual CPU structure.
10610 * @param cbInstr The instruction length in bytes.
10611 *
10612 * @remarks Not all of the state needs to be synced in. Recommended
10613 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10614 */
10615VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10616{
10617 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10618 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10619
10620 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10621 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10622 Assert(!pVCpu->iem.s.cActiveMappings);
10623 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10624}
10625
10626
10627/**
10628 * Interface for HM and EM to emulate the RDMSR instruction.
10629 *
10630 * @returns Strict VBox status code.
10631 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10632 *
10633 * @param pVCpu The cross context virtual CPU structure.
10634 * @param cbInstr The instruction length in bytes.
10635 *
10636 * @remarks Not all of the state needs to be synced in. Requires RCX and
10637 * (currently) all MSRs.
10638 */
10639VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10640{
10641 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10642 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10643
10644 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10645 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10646 Assert(!pVCpu->iem.s.cActiveMappings);
10647 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10648}
10649
10650
10651/**
10652 * Interface for HM and EM to emulate the WRMSR instruction.
10653 *
10654 * @returns Strict VBox status code.
10655 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10656 *
10657 * @param pVCpu The cross context virtual CPU structure.
10658 * @param cbInstr The instruction length in bytes.
10659 *
10660 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10661 * and (currently) all MSRs.
10662 */
10663VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10664{
10665 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10666 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10667 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10668
10669 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10670 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10671 Assert(!pVCpu->iem.s.cActiveMappings);
10672 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10673}
10674
10675
10676/**
10677 * Interface for HM and EM to emulate the MONITOR instruction.
10678 *
10679 * @returns Strict VBox status code.
10680 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10681 *
10682 * @param pVCpu The cross context virtual CPU structure.
10683 * @param cbInstr The instruction length in bytes.
10684 *
10685 * @remarks Not all of the state needs to be synced in.
10686 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10687 * are used.
10688 */
10689VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10690{
10691 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10692 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10693
10694 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10695 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10696 Assert(!pVCpu->iem.s.cActiveMappings);
10697 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10698}
10699
10700
10701/**
10702 * Interface for HM and EM to emulate the MWAIT instruction.
10703 *
10704 * @returns Strict VBox status code.
10705 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10706 *
10707 * @param pVCpu The cross context virtual CPU structure.
10708 * @param cbInstr The instruction length in bytes.
10709 *
10710 * @remarks Not all of the state needs to be synced in.
10711 */
10712VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10713{
10714 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10715 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10716
10717 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10718 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10719 Assert(!pVCpu->iem.s.cActiveMappings);
10720 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10721}
10722
10723
10724/**
10725 * Interface for HM and EM to emulate the HLT instruction.
10726 *
10727 * @returns Strict VBox status code.
10728 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10729 *
10730 * @param pVCpu The cross context virtual CPU structure.
10731 * @param cbInstr The instruction length in bytes.
10732 *
10733 * @remarks Not all of the state needs to be synced in.
10734 */
10735VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10736{
10737 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10738
10739 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10740 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10741 Assert(!pVCpu->iem.s.cActiveMappings);
10742 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10743}
10744
10745
10746/**
10747 * Checks if IEM is in the process of delivering an event (interrupt or
10748 * exception).
10749 *
10750 * @returns true if we're in the process of raising an interrupt or exception,
10751 * false otherwise.
10752 * @param pVCpu The cross context virtual CPU structure.
10753 * @param puVector Where to store the vector associated with the
10754 * currently delivered event, optional.
10755 * @param pfFlags Where to store th event delivery flags (see
10756 * IEM_XCPT_FLAGS_XXX), optional.
10757 * @param puErr Where to store the error code associated with the
10758 * event, optional.
10759 * @param puCr2 Where to store the CR2 associated with the event,
10760 * optional.
10761 * @remarks The caller should check the flags to determine if the error code and
10762 * CR2 are valid for the event.
10763 */
10764VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10765{
10766 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10767 if (fRaisingXcpt)
10768 {
10769 if (puVector)
10770 *puVector = pVCpu->iem.s.uCurXcpt;
10771 if (pfFlags)
10772 *pfFlags = pVCpu->iem.s.fCurXcpt;
10773 if (puErr)
10774 *puErr = pVCpu->iem.s.uCurXcptErr;
10775 if (puCr2)
10776 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10777 }
10778 return fRaisingXcpt;
10779}
10780
10781#ifdef IN_RING3
10782
10783/**
10784 * Handles the unlikely and probably fatal merge cases.
10785 *
10786 * @returns Merged status code.
10787 * @param rcStrict Current EM status code.
10788 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10789 * with @a rcStrict.
10790 * @param iMemMap The memory mapping index. For error reporting only.
10791 * @param pVCpu The cross context virtual CPU structure of the calling
10792 * thread, for error reporting only.
10793 */
10794DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10795 unsigned iMemMap, PVMCPUCC pVCpu)
10796{
10797 if (RT_FAILURE_NP(rcStrict))
10798 return rcStrict;
10799
10800 if (RT_FAILURE_NP(rcStrictCommit))
10801 return rcStrictCommit;
10802
10803 if (rcStrict == rcStrictCommit)
10804 return rcStrictCommit;
10805
10806 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10807 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10808 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10810 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10811 return VERR_IOM_FF_STATUS_IPE;
10812}
10813
10814
10815/**
10816 * Helper for IOMR3ProcessForceFlag.
10817 *
10818 * @returns Merged status code.
10819 * @param rcStrict Current EM status code.
10820 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10821 * with @a rcStrict.
10822 * @param iMemMap The memory mapping index. For error reporting only.
10823 * @param pVCpu The cross context virtual CPU structure of the calling
10824 * thread, for error reporting only.
10825 */
10826DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10827{
10828 /* Simple. */
10829 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10830 return rcStrictCommit;
10831
10832 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10833 return rcStrict;
10834
10835 /* EM scheduling status codes. */
10836 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10837 && rcStrict <= VINF_EM_LAST))
10838 {
10839 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10840 && rcStrictCommit <= VINF_EM_LAST))
10841 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10842 }
10843
10844 /* Unlikely */
10845 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10846}
10847
10848
10849/**
10850 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10851 *
10852 * @returns Merge between @a rcStrict and what the commit operation returned.
10853 * @param pVM The cross context VM structure.
10854 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10855 * @param rcStrict The status code returned by ring-0 or raw-mode.
10856 */
10857VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10858{
10859 /*
10860 * Reset the pending commit.
10861 */
10862 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10863 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10864 ("%#x %#x %#x\n",
10865 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10866 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10867
10868 /*
10869 * Commit the pending bounce buffers (usually just one).
10870 */
10871 unsigned cBufs = 0;
10872 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10873 while (iMemMap-- > 0)
10874 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10875 {
10876 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10877 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10878 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10879
10880 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10881 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10882 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10883
10884 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10885 {
10886 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10888 pbBuf,
10889 cbFirst,
10890 PGMACCESSORIGIN_IEM);
10891 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10892 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10893 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10894 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10895 }
10896
10897 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10898 {
10899 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10901 pbBuf + cbFirst,
10902 cbSecond,
10903 PGMACCESSORIGIN_IEM);
10904 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10905 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10906 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10907 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10908 }
10909 cBufs++;
10910 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10911 }
10912
10913 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10914 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10915 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10916 pVCpu->iem.s.cActiveMappings = 0;
10917 return rcStrict;
10918}
10919
10920#endif /* IN_RING3 */
10921
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette