VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 102310

Last change on this file since 102310 was 102310, checked in by vboxsync, 14 months ago

VMM/IEM: Hook up the DBGFEVENT_XCPT_XXX events; more exception logging; disabled the Log3 stuff via cpum as it's way to verbose these days. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 447.4 KB
Line 
1/* $Id: IEMAll.cpp 102310 2023-11-27 12:58:44Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller == pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.pbInstrBuf = NULL;
819 pVCpu->iem.s.cbInstrBufTotal = 0;
820 RT_NOREF(cbInstr);
821#else
822 RT_NOREF(pVCpu, cbInstr);
823#endif
824}
825
826
827
828#ifdef IEM_WITH_CODE_TLB
829
830/**
831 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
832 * failure and jumps.
833 *
834 * We end up here for a number of reasons:
835 * - pbInstrBuf isn't yet initialized.
836 * - Advancing beyond the buffer boundrary (e.g. cross page).
837 * - Advancing beyond the CS segment limit.
838 * - Fetching from non-mappable page (e.g. MMIO).
839 *
840 * @param pVCpu The cross context virtual CPU structure of the
841 * calling thread.
842 * @param pvDst Where to return the bytes.
843 * @param cbDst Number of bytes to read. A value of zero is
844 * allowed for initializing pbInstrBuf (the
845 * recompiler does this). In this case it is best
846 * to set pbInstrBuf to NULL prior to the call.
847 */
848void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
849{
850# ifdef IN_RING3
851 for (;;)
852 {
853 Assert(cbDst <= 8);
854 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
855
856 /*
857 * We might have a partial buffer match, deal with that first to make the
858 * rest simpler. This is the first part of the cross page/buffer case.
859 */
860 if (pVCpu->iem.s.pbInstrBuf != NULL)
861 {
862 if (offBuf < pVCpu->iem.s.cbInstrBuf)
863 {
864 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
865 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
866 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
867
868 cbDst -= cbCopy;
869 pvDst = (uint8_t *)pvDst + cbCopy;
870 offBuf += cbCopy;
871 pVCpu->iem.s.offInstrNextByte += offBuf;
872 }
873 }
874
875 /*
876 * Check segment limit, figuring how much we're allowed to access at this point.
877 *
878 * We will fault immediately if RIP is past the segment limit / in non-canonical
879 * territory. If we do continue, there are one or more bytes to read before we
880 * end up in trouble and we need to do that first before faulting.
881 */
882 RTGCPTR GCPtrFirst;
883 uint32_t cbMaxRead;
884 if (IEM_IS_64BIT_CODE(pVCpu))
885 {
886 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
887 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
888 { /* likely */ }
889 else
890 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
891 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
892 }
893 else
894 {
895 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
896 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
897 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
898 { /* likely */ }
899 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
900 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
901 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
902 if (cbMaxRead != 0)
903 { /* likely */ }
904 else
905 {
906 /* Overflowed because address is 0 and limit is max. */
907 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
908 cbMaxRead = X86_PAGE_SIZE;
909 }
910 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
911 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
912 if (cbMaxRead2 < cbMaxRead)
913 cbMaxRead = cbMaxRead2;
914 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
915 }
916
917 /*
918 * Get the TLB entry for this piece of code.
919 */
920 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
921 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
922 if (pTlbe->uTag == uTag)
923 {
924 /* likely when executing lots of code, otherwise unlikely */
925# ifdef VBOX_WITH_STATISTICS
926 pVCpu->iem.s.CodeTlb.cTlbHits++;
927# endif
928 }
929 else
930 {
931 pVCpu->iem.s.CodeTlb.cTlbMisses++;
932 PGMPTWALK Walk;
933 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
934 if (RT_FAILURE(rc))
935 {
936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
937 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
938 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
939#endif
940 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
941 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
942 }
943
944 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
945 Assert(Walk.fSucceeded);
946 pTlbe->uTag = uTag;
947 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
948 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
949 pTlbe->GCPhys = Walk.GCPhys;
950 pTlbe->pbMappingR3 = NULL;
951 }
952
953 /*
954 * Check TLB page table level access flags.
955 */
956 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
957 {
958 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
959 {
960 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
961 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
962 }
963 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
964 {
965 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
966 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
967 }
968 }
969
970 /*
971 * Look up the physical page info if necessary.
972 */
973 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
974 { /* not necessary */ }
975 else
976 {
977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
978 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
979 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
980 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
981 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
982 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
983 { /* likely */ }
984 else
985 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
986 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
987 | IEMTLBE_F_NO_MAPPINGR3
988 | IEMTLBE_F_PG_NO_READ
989 | IEMTLBE_F_PG_NO_WRITE
990 | IEMTLBE_F_PG_UNASSIGNED
991 | IEMTLBE_F_PG_CODE_PAGE);
992 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
993 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
994 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
995 }
996
997# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
998 /*
999 * Try do a direct read using the pbMappingR3 pointer.
1000 */
1001 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1002 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1003 {
1004 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1005 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1006 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1007 {
1008 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1009 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1010 }
1011 else
1012 {
1013 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1014 if (cbInstr + (uint32_t)cbDst <= 15)
1015 {
1016 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1017 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1018 }
1019 else
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1022 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1023 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1024 }
1025 }
1026 if (cbDst <= cbMaxRead)
1027 {
1028 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1029 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1030
1031 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1032 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1033 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1034 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1035 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1036 return;
1037 }
1038 pVCpu->iem.s.pbInstrBuf = NULL;
1039
1040 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1041 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1042 }
1043# else
1044# error "refactor as needed"
1045 /*
1046 * If there is no special read handling, so we can read a bit more and
1047 * put it in the prefetch buffer.
1048 */
1049 if ( cbDst < cbMaxRead
1050 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1053 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1062 }
1063 else
1064 {
1065 Log((RT_SUCCESS(rcStrict)
1066 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1067 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1068 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1069 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1070 }
1071 }
1072# endif
1073 /*
1074 * Special read handling, so only read exactly what's needed.
1075 * This is a highly unlikely scenario.
1076 */
1077 else
1078 {
1079 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1080
1081 /* Check instruction length. */
1082 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1083 if (RT_LIKELY(cbInstr + cbDst <= 15))
1084 { /* likely */ }
1085 else
1086 {
1087 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1088 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1089 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1090 }
1091
1092 /* Do the reading. */
1093 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1094 if (cbToRead > 0)
1095 {
1096 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1097 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1098 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1099 { /* likely */ }
1100 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1101 {
1102 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1103 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1105 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1106 }
1107 else
1108 {
1109 Log((RT_SUCCESS(rcStrict)
1110 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1111 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1112 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1113 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1114 }
1115 }
1116
1117 /* Update the state and probably return. */
1118 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1119 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1120 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1121
1122 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1123 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1124 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1125 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1126 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1127 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1128 pVCpu->iem.s.pbInstrBuf = NULL;
1129 if (cbToRead == cbDst)
1130 return;
1131 }
1132
1133 /*
1134 * More to read, loop.
1135 */
1136 cbDst -= cbMaxRead;
1137 pvDst = (uint8_t *)pvDst + cbMaxRead;
1138 }
1139# else /* !IN_RING3 */
1140 RT_NOREF(pvDst, cbDst);
1141 if (pvDst || cbDst)
1142 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1143# endif /* !IN_RING3 */
1144}
1145
1146#else /* !IEM_WITH_CODE_TLB */
1147
1148/**
1149 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1150 * exception if it fails.
1151 *
1152 * @returns Strict VBox status code.
1153 * @param pVCpu The cross context virtual CPU structure of the
1154 * calling thread.
1155 * @param cbMin The minimum number of bytes relative offOpcode
1156 * that must be read.
1157 */
1158VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1159{
1160 /*
1161 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1162 *
1163 * First translate CS:rIP to a physical address.
1164 */
1165 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1166 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1167 uint8_t const cbLeft = cbOpcode - offOpcode;
1168 Assert(cbLeft < cbMin);
1169 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1170
1171 uint32_t cbToTryRead;
1172 RTGCPTR GCPtrNext;
1173 if (IEM_IS_64BIT_CODE(pVCpu))
1174 {
1175 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1176 if (!IEM_IS_CANONICAL(GCPtrNext))
1177 return iemRaiseGeneralProtectionFault0(pVCpu);
1178 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1179 }
1180 else
1181 {
1182 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1183 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1184 GCPtrNext32 += cbOpcode;
1185 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1186 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1187 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1188 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1189 if (!cbToTryRead) /* overflowed */
1190 {
1191 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1192 cbToTryRead = UINT32_MAX;
1193 /** @todo check out wrapping around the code segment. */
1194 }
1195 if (cbToTryRead < cbMin - cbLeft)
1196 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1197 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1198
1199 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1200 if (cbToTryRead > cbLeftOnPage)
1201 cbToTryRead = cbLeftOnPage;
1202 }
1203
1204 /* Restrict to opcode buffer space.
1205
1206 We're making ASSUMPTIONS here based on work done previously in
1207 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1208 be fetched in case of an instruction crossing two pages. */
1209 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1210 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1211 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1212 { /* likely */ }
1213 else
1214 {
1215 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1216 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1217 return iemRaiseGeneralProtectionFault0(pVCpu);
1218 }
1219
1220 PGMPTWALK Walk;
1221 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1222 if (RT_FAILURE(rc))
1223 {
1224 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1225#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1226 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1227 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1228#endif
1229 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1230 }
1231 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1232 {
1233 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1234#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1235 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1236 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1237#endif
1238 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1239 }
1240 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1241 {
1242 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1243#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1244 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1245 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1246#endif
1247 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1248 }
1249 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1250 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1251 /** @todo Check reserved bits and such stuff. PGM is better at doing
1252 * that, so do it when implementing the guest virtual address
1253 * TLB... */
1254
1255 /*
1256 * Read the bytes at this address.
1257 *
1258 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1259 * and since PATM should only patch the start of an instruction there
1260 * should be no need to check again here.
1261 */
1262 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1263 {
1264 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1265 cbToTryRead, PGMACCESSORIGIN_IEM);
1266 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1267 { /* likely */ }
1268 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1269 {
1270 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1271 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1272 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1273 }
1274 else
1275 {
1276 Log((RT_SUCCESS(rcStrict)
1277 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1278 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1279 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1280 return rcStrict;
1281 }
1282 }
1283 else
1284 {
1285 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1286 if (RT_SUCCESS(rc))
1287 { /* likely */ }
1288 else
1289 {
1290 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1291 return rc;
1292 }
1293 }
1294 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1295 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1296
1297 return VINF_SUCCESS;
1298}
1299
1300#endif /* !IEM_WITH_CODE_TLB */
1301#ifndef IEM_WITH_SETJMP
1302
1303/**
1304 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1305 *
1306 * @returns Strict VBox status code.
1307 * @param pVCpu The cross context virtual CPU structure of the
1308 * calling thread.
1309 * @param pb Where to return the opcode byte.
1310 */
1311VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1312{
1313 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1314 if (rcStrict == VINF_SUCCESS)
1315 {
1316 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1317 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1318 pVCpu->iem.s.offOpcode = offOpcode + 1;
1319 }
1320 else
1321 *pb = 0;
1322 return rcStrict;
1323}
1324
1325#else /* IEM_WITH_SETJMP */
1326
1327/**
1328 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1329 *
1330 * @returns The opcode byte.
1331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1332 */
1333uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1334{
1335# ifdef IEM_WITH_CODE_TLB
1336 uint8_t u8;
1337 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1338 return u8;
1339# else
1340 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1341 if (rcStrict == VINF_SUCCESS)
1342 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1343 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1344# endif
1345}
1346
1347#endif /* IEM_WITH_SETJMP */
1348
1349#ifndef IEM_WITH_SETJMP
1350
1351/**
1352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1353 *
1354 * @returns Strict VBox status code.
1355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1356 * @param pu16 Where to return the opcode dword.
1357 */
1358VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1359{
1360 uint8_t u8;
1361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1362 if (rcStrict == VINF_SUCCESS)
1363 *pu16 = (int8_t)u8;
1364 return rcStrict;
1365}
1366
1367
1368/**
1369 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1370 *
1371 * @returns Strict VBox status code.
1372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1373 * @param pu32 Where to return the opcode dword.
1374 */
1375VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1376{
1377 uint8_t u8;
1378 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1379 if (rcStrict == VINF_SUCCESS)
1380 *pu32 = (int8_t)u8;
1381 return rcStrict;
1382}
1383
1384
1385/**
1386 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1387 *
1388 * @returns Strict VBox status code.
1389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1390 * @param pu64 Where to return the opcode qword.
1391 */
1392VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1393{
1394 uint8_t u8;
1395 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1396 if (rcStrict == VINF_SUCCESS)
1397 *pu64 = (int8_t)u8;
1398 return rcStrict;
1399}
1400
1401#endif /* !IEM_WITH_SETJMP */
1402
1403
1404#ifndef IEM_WITH_SETJMP
1405
1406/**
1407 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1408 *
1409 * @returns Strict VBox status code.
1410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1411 * @param pu16 Where to return the opcode word.
1412 */
1413VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1414{
1415 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1416 if (rcStrict == VINF_SUCCESS)
1417 {
1418 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1419# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1420 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1421# else
1422 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1423# endif
1424 pVCpu->iem.s.offOpcode = offOpcode + 2;
1425 }
1426 else
1427 *pu16 = 0;
1428 return rcStrict;
1429}
1430
1431#else /* IEM_WITH_SETJMP */
1432
1433/**
1434 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1435 *
1436 * @returns The opcode word.
1437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1438 */
1439uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1440{
1441# ifdef IEM_WITH_CODE_TLB
1442 uint16_t u16;
1443 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1444 return u16;
1445# else
1446 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1447 if (rcStrict == VINF_SUCCESS)
1448 {
1449 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1450 pVCpu->iem.s.offOpcode += 2;
1451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1452 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1453# else
1454 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1455# endif
1456 }
1457 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1458# endif
1459}
1460
1461#endif /* IEM_WITH_SETJMP */
1462
1463#ifndef IEM_WITH_SETJMP
1464
1465/**
1466 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1467 *
1468 * @returns Strict VBox status code.
1469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1470 * @param pu32 Where to return the opcode double word.
1471 */
1472VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1473{
1474 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1475 if (rcStrict == VINF_SUCCESS)
1476 {
1477 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1478 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1479 pVCpu->iem.s.offOpcode = offOpcode + 2;
1480 }
1481 else
1482 *pu32 = 0;
1483 return rcStrict;
1484}
1485
1486
1487/**
1488 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1489 *
1490 * @returns Strict VBox status code.
1491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1492 * @param pu64 Where to return the opcode quad word.
1493 */
1494VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1495{
1496 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1497 if (rcStrict == VINF_SUCCESS)
1498 {
1499 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1500 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1501 pVCpu->iem.s.offOpcode = offOpcode + 2;
1502 }
1503 else
1504 *pu64 = 0;
1505 return rcStrict;
1506}
1507
1508#endif /* !IEM_WITH_SETJMP */
1509
1510#ifndef IEM_WITH_SETJMP
1511
1512/**
1513 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1514 *
1515 * @returns Strict VBox status code.
1516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1517 * @param pu32 Where to return the opcode dword.
1518 */
1519VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1520{
1521 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1522 if (rcStrict == VINF_SUCCESS)
1523 {
1524 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1525# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1526 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1527# else
1528 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1529 pVCpu->iem.s.abOpcode[offOpcode + 1],
1530 pVCpu->iem.s.abOpcode[offOpcode + 2],
1531 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1532# endif
1533 pVCpu->iem.s.offOpcode = offOpcode + 4;
1534 }
1535 else
1536 *pu32 = 0;
1537 return rcStrict;
1538}
1539
1540#else /* IEM_WITH_SETJMP */
1541
1542/**
1543 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1544 *
1545 * @returns The opcode dword.
1546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1547 */
1548uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1549{
1550# ifdef IEM_WITH_CODE_TLB
1551 uint32_t u32;
1552 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1553 return u32;
1554# else
1555 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1556 if (rcStrict == VINF_SUCCESS)
1557 {
1558 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1559 pVCpu->iem.s.offOpcode = offOpcode + 4;
1560# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1561 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1562# else
1563 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1564 pVCpu->iem.s.abOpcode[offOpcode + 1],
1565 pVCpu->iem.s.abOpcode[offOpcode + 2],
1566 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1567# endif
1568 }
1569 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1570# endif
1571}
1572
1573#endif /* IEM_WITH_SETJMP */
1574
1575#ifndef IEM_WITH_SETJMP
1576
1577/**
1578 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1579 *
1580 * @returns Strict VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1582 * @param pu64 Where to return the opcode dword.
1583 */
1584VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1585{
1586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1587 if (rcStrict == VINF_SUCCESS)
1588 {
1589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1590 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1591 pVCpu->iem.s.abOpcode[offOpcode + 1],
1592 pVCpu->iem.s.abOpcode[offOpcode + 2],
1593 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1594 pVCpu->iem.s.offOpcode = offOpcode + 4;
1595 }
1596 else
1597 *pu64 = 0;
1598 return rcStrict;
1599}
1600
1601
1602/**
1603 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1604 *
1605 * @returns Strict VBox status code.
1606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1607 * @param pu64 Where to return the opcode qword.
1608 */
1609VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1610{
1611 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1612 if (rcStrict == VINF_SUCCESS)
1613 {
1614 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1615 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1616 pVCpu->iem.s.abOpcode[offOpcode + 1],
1617 pVCpu->iem.s.abOpcode[offOpcode + 2],
1618 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1619 pVCpu->iem.s.offOpcode = offOpcode + 4;
1620 }
1621 else
1622 *pu64 = 0;
1623 return rcStrict;
1624}
1625
1626#endif /* !IEM_WITH_SETJMP */
1627
1628#ifndef IEM_WITH_SETJMP
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1632 *
1633 * @returns Strict VBox status code.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 * @param pu64 Where to return the opcode qword.
1636 */
1637VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1638{
1639 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1640 if (rcStrict == VINF_SUCCESS)
1641 {
1642 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1643# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1644 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1645# else
1646 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1647 pVCpu->iem.s.abOpcode[offOpcode + 1],
1648 pVCpu->iem.s.abOpcode[offOpcode + 2],
1649 pVCpu->iem.s.abOpcode[offOpcode + 3],
1650 pVCpu->iem.s.abOpcode[offOpcode + 4],
1651 pVCpu->iem.s.abOpcode[offOpcode + 5],
1652 pVCpu->iem.s.abOpcode[offOpcode + 6],
1653 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1654# endif
1655 pVCpu->iem.s.offOpcode = offOpcode + 8;
1656 }
1657 else
1658 *pu64 = 0;
1659 return rcStrict;
1660}
1661
1662#else /* IEM_WITH_SETJMP */
1663
1664/**
1665 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1666 *
1667 * @returns The opcode qword.
1668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1669 */
1670uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1671{
1672# ifdef IEM_WITH_CODE_TLB
1673 uint64_t u64;
1674 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1675 return u64;
1676# else
1677 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1678 if (rcStrict == VINF_SUCCESS)
1679 {
1680 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1681 pVCpu->iem.s.offOpcode = offOpcode + 8;
1682# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1683 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1684# else
1685 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1686 pVCpu->iem.s.abOpcode[offOpcode + 1],
1687 pVCpu->iem.s.abOpcode[offOpcode + 2],
1688 pVCpu->iem.s.abOpcode[offOpcode + 3],
1689 pVCpu->iem.s.abOpcode[offOpcode + 4],
1690 pVCpu->iem.s.abOpcode[offOpcode + 5],
1691 pVCpu->iem.s.abOpcode[offOpcode + 6],
1692 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1693# endif
1694 }
1695 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1696# endif
1697}
1698
1699#endif /* IEM_WITH_SETJMP */
1700
1701
1702
1703/** @name Misc Worker Functions.
1704 * @{
1705 */
1706
1707/**
1708 * Gets the exception class for the specified exception vector.
1709 *
1710 * @returns The class of the specified exception.
1711 * @param uVector The exception vector.
1712 */
1713static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1714{
1715 Assert(uVector <= X86_XCPT_LAST);
1716 switch (uVector)
1717 {
1718 case X86_XCPT_DE:
1719 case X86_XCPT_TS:
1720 case X86_XCPT_NP:
1721 case X86_XCPT_SS:
1722 case X86_XCPT_GP:
1723 case X86_XCPT_SX: /* AMD only */
1724 return IEMXCPTCLASS_CONTRIBUTORY;
1725
1726 case X86_XCPT_PF:
1727 case X86_XCPT_VE: /* Intel only */
1728 return IEMXCPTCLASS_PAGE_FAULT;
1729
1730 case X86_XCPT_DF:
1731 return IEMXCPTCLASS_DOUBLE_FAULT;
1732 }
1733 return IEMXCPTCLASS_BENIGN;
1734}
1735
1736
1737/**
1738 * Evaluates how to handle an exception caused during delivery of another event
1739 * (exception / interrupt).
1740 *
1741 * @returns How to handle the recursive exception.
1742 * @param pVCpu The cross context virtual CPU structure of the
1743 * calling thread.
1744 * @param fPrevFlags The flags of the previous event.
1745 * @param uPrevVector The vector of the previous event.
1746 * @param fCurFlags The flags of the current exception.
1747 * @param uCurVector The vector of the current exception.
1748 * @param pfXcptRaiseInfo Where to store additional information about the
1749 * exception condition. Optional.
1750 */
1751VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1752 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1753{
1754 /*
1755 * Only CPU exceptions can be raised while delivering other events, software interrupt
1756 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1757 */
1758 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1759 Assert(pVCpu); RT_NOREF(pVCpu);
1760 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1761
1762 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1763 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1764 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1765 {
1766 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1767 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1768 {
1769 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1770 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1771 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1772 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1773 {
1774 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1775 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1776 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1777 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1778 uCurVector, pVCpu->cpum.GstCtx.cr2));
1779 }
1780 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1781 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1782 {
1783 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1784 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1785 }
1786 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1787 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1788 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1789 {
1790 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1791 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1792 }
1793 }
1794 else
1795 {
1796 if (uPrevVector == X86_XCPT_NMI)
1797 {
1798 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1799 if (uCurVector == X86_XCPT_PF)
1800 {
1801 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1802 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1803 }
1804 }
1805 else if ( uPrevVector == X86_XCPT_AC
1806 && uCurVector == X86_XCPT_AC)
1807 {
1808 enmRaise = IEMXCPTRAISE_CPU_HANG;
1809 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1810 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1811 }
1812 }
1813 }
1814 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1815 {
1816 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1817 if (uCurVector == X86_XCPT_PF)
1818 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1819 }
1820 else
1821 {
1822 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1823 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1824 }
1825
1826 if (pfXcptRaiseInfo)
1827 *pfXcptRaiseInfo = fRaiseInfo;
1828 return enmRaise;
1829}
1830
1831
1832/**
1833 * Enters the CPU shutdown state initiated by a triple fault or other
1834 * unrecoverable conditions.
1835 *
1836 * @returns Strict VBox status code.
1837 * @param pVCpu The cross context virtual CPU structure of the
1838 * calling thread.
1839 */
1840static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1841{
1842 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1843 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1844
1845 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1846 {
1847 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1848 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1849 }
1850
1851 RT_NOREF(pVCpu);
1852 return VINF_EM_TRIPLE_FAULT;
1853}
1854
1855
1856/**
1857 * Validates a new SS segment.
1858 *
1859 * @returns VBox strict status code.
1860 * @param pVCpu The cross context virtual CPU structure of the
1861 * calling thread.
1862 * @param NewSS The new SS selctor.
1863 * @param uCpl The CPL to load the stack for.
1864 * @param pDesc Where to return the descriptor.
1865 */
1866static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1867{
1868 /* Null selectors are not allowed (we're not called for dispatching
1869 interrupts with SS=0 in long mode). */
1870 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1871 {
1872 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1873 return iemRaiseTaskSwitchFault0(pVCpu);
1874 }
1875
1876 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1877 if ((NewSS & X86_SEL_RPL) != uCpl)
1878 {
1879 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1880 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1881 }
1882
1883 /*
1884 * Read the descriptor.
1885 */
1886 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1887 if (rcStrict != VINF_SUCCESS)
1888 return rcStrict;
1889
1890 /*
1891 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1892 */
1893 if (!pDesc->Legacy.Gen.u1DescType)
1894 {
1895 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1896 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1897 }
1898
1899 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1900 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1901 {
1902 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1903 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1904 }
1905 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1906 {
1907 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1908 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1909 }
1910
1911 /* Is it there? */
1912 /** @todo testcase: Is this checked before the canonical / limit check below? */
1913 if (!pDesc->Legacy.Gen.u1Present)
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1916 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1917 }
1918
1919 return VINF_SUCCESS;
1920}
1921
1922/** @} */
1923
1924
1925/** @name Raising Exceptions.
1926 *
1927 * @{
1928 */
1929
1930
1931/**
1932 * Loads the specified stack far pointer from the TSS.
1933 *
1934 * @returns VBox strict status code.
1935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1936 * @param uCpl The CPL to load the stack for.
1937 * @param pSelSS Where to return the new stack segment.
1938 * @param puEsp Where to return the new stack pointer.
1939 */
1940static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1941{
1942 VBOXSTRICTRC rcStrict;
1943 Assert(uCpl < 4);
1944
1945 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1946 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1947 {
1948 /*
1949 * 16-bit TSS (X86TSS16).
1950 */
1951 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1952 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1953 {
1954 uint32_t off = uCpl * 4 + 2;
1955 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1956 {
1957 /** @todo check actual access pattern here. */
1958 uint32_t u32Tmp = 0; /* gcc maybe... */
1959 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1960 if (rcStrict == VINF_SUCCESS)
1961 {
1962 *puEsp = RT_LOWORD(u32Tmp);
1963 *pSelSS = RT_HIWORD(u32Tmp);
1964 return VINF_SUCCESS;
1965 }
1966 }
1967 else
1968 {
1969 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1970 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1971 }
1972 break;
1973 }
1974
1975 /*
1976 * 32-bit TSS (X86TSS32).
1977 */
1978 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1979 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1980 {
1981 uint32_t off = uCpl * 8 + 4;
1982 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1983 {
1984/** @todo check actual access pattern here. */
1985 uint64_t u64Tmp;
1986 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1987 if (rcStrict == VINF_SUCCESS)
1988 {
1989 *puEsp = u64Tmp & UINT32_MAX;
1990 *pSelSS = (RTSEL)(u64Tmp >> 32);
1991 return VINF_SUCCESS;
1992 }
1993 }
1994 else
1995 {
1996 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1997 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1998 }
1999 break;
2000 }
2001
2002 default:
2003 AssertFailed();
2004 rcStrict = VERR_IEM_IPE_4;
2005 break;
2006 }
2007
2008 *puEsp = 0; /* make gcc happy */
2009 *pSelSS = 0; /* make gcc happy */
2010 return rcStrict;
2011}
2012
2013
2014/**
2015 * Loads the specified stack pointer from the 64-bit TSS.
2016 *
2017 * @returns VBox strict status code.
2018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2019 * @param uCpl The CPL to load the stack for.
2020 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2021 * @param puRsp Where to return the new stack pointer.
2022 */
2023static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2024{
2025 Assert(uCpl < 4);
2026 Assert(uIst < 8);
2027 *puRsp = 0; /* make gcc happy */
2028
2029 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2030 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2031
2032 uint32_t off;
2033 if (uIst)
2034 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2035 else
2036 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2037 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2038 {
2039 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2040 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2041 }
2042
2043 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2044}
2045
2046
2047/**
2048 * Adjust the CPU state according to the exception being raised.
2049 *
2050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2051 * @param u8Vector The exception that has been raised.
2052 */
2053DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2054{
2055 switch (u8Vector)
2056 {
2057 case X86_XCPT_DB:
2058 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2059 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2060 break;
2061 /** @todo Read the AMD and Intel exception reference... */
2062 }
2063}
2064
2065
2066/**
2067 * Implements exceptions and interrupts for real mode.
2068 *
2069 * @returns VBox strict status code.
2070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2071 * @param cbInstr The number of bytes to offset rIP by in the return
2072 * address.
2073 * @param u8Vector The interrupt / exception vector number.
2074 * @param fFlags The flags.
2075 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2076 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2077 */
2078static VBOXSTRICTRC
2079iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2080 uint8_t cbInstr,
2081 uint8_t u8Vector,
2082 uint32_t fFlags,
2083 uint16_t uErr,
2084 uint64_t uCr2) RT_NOEXCEPT
2085{
2086 NOREF(uErr); NOREF(uCr2);
2087 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2088
2089 /*
2090 * Read the IDT entry.
2091 */
2092 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2093 {
2094 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2095 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2096 }
2097 RTFAR16 Idte;
2098 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2099 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2100 {
2101 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2102 return rcStrict;
2103 }
2104
2105 /*
2106 * Push the stack frame.
2107 */
2108 uint16_t *pu16Frame;
2109 uint64_t uNewRsp;
2110 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2111 if (rcStrict != VINF_SUCCESS)
2112 return rcStrict;
2113
2114 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2115#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2116 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2117 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2118 fEfl |= UINT16_C(0xf000);
2119#endif
2120 pu16Frame[2] = (uint16_t)fEfl;
2121 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2122 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2123 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2124 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2125 return rcStrict;
2126
2127 /*
2128 * Load the vector address into cs:ip and make exception specific state
2129 * adjustments.
2130 */
2131 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2132 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2133 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2134 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2135 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2136 pVCpu->cpum.GstCtx.rip = Idte.off;
2137 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2138 IEMMISC_SET_EFL(pVCpu, fEfl);
2139
2140 /** @todo do we actually do this in real mode? */
2141 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2142 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2143
2144 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2145 so best leave them alone in case we're in a weird kind of real mode... */
2146
2147 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2148}
2149
2150
2151/**
2152 * Loads a NULL data selector into when coming from V8086 mode.
2153 *
2154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2155 * @param pSReg Pointer to the segment register.
2156 */
2157DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2158{
2159 pSReg->Sel = 0;
2160 pSReg->ValidSel = 0;
2161 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2162 {
2163 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2164 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2165 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2166 }
2167 else
2168 {
2169 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2170 /** @todo check this on AMD-V */
2171 pSReg->u64Base = 0;
2172 pSReg->u32Limit = 0;
2173 }
2174}
2175
2176
2177/**
2178 * Loads a segment selector during a task switch in V8086 mode.
2179 *
2180 * @param pSReg Pointer to the segment register.
2181 * @param uSel The selector value to load.
2182 */
2183DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2184{
2185 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2186 pSReg->Sel = uSel;
2187 pSReg->ValidSel = uSel;
2188 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2189 pSReg->u64Base = uSel << 4;
2190 pSReg->u32Limit = 0xffff;
2191 pSReg->Attr.u = 0xf3;
2192}
2193
2194
2195/**
2196 * Loads a segment selector during a task switch in protected mode.
2197 *
2198 * In this task switch scenario, we would throw \#TS exceptions rather than
2199 * \#GPs.
2200 *
2201 * @returns VBox strict status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 * @param pSReg Pointer to the segment register.
2204 * @param uSel The new selector value.
2205 *
2206 * @remarks This does _not_ handle CS or SS.
2207 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2208 */
2209static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2210{
2211 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2212
2213 /* Null data selector. */
2214 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2215 {
2216 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2218 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2219 return VINF_SUCCESS;
2220 }
2221
2222 /* Fetch the descriptor. */
2223 IEMSELDESC Desc;
2224 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2225 if (rcStrict != VINF_SUCCESS)
2226 {
2227 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2228 VBOXSTRICTRC_VAL(rcStrict)));
2229 return rcStrict;
2230 }
2231
2232 /* Must be a data segment or readable code segment. */
2233 if ( !Desc.Legacy.Gen.u1DescType
2234 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2235 {
2236 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2237 Desc.Legacy.Gen.u4Type));
2238 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2239 }
2240
2241 /* Check privileges for data segments and non-conforming code segments. */
2242 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2243 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2244 {
2245 /* The RPL and the new CPL must be less than or equal to the DPL. */
2246 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2247 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2248 {
2249 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2250 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2251 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2252 }
2253 }
2254
2255 /* Is it there? */
2256 if (!Desc.Legacy.Gen.u1Present)
2257 {
2258 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2259 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2260 }
2261
2262 /* The base and limit. */
2263 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2264 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2265
2266 /*
2267 * Ok, everything checked out fine. Now set the accessed bit before
2268 * committing the result into the registers.
2269 */
2270 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2271 {
2272 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2273 if (rcStrict != VINF_SUCCESS)
2274 return rcStrict;
2275 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2276 }
2277
2278 /* Commit */
2279 pSReg->Sel = uSel;
2280 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2281 pSReg->u32Limit = cbLimit;
2282 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2283 pSReg->ValidSel = uSel;
2284 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2285 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2286 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2287
2288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2289 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2290 return VINF_SUCCESS;
2291}
2292
2293
2294/**
2295 * Performs a task switch.
2296 *
2297 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2298 * caller is responsible for performing the necessary checks (like DPL, TSS
2299 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2300 * reference for JMP, CALL, IRET.
2301 *
2302 * If the task switch is the due to a software interrupt or hardware exception,
2303 * the caller is responsible for validating the TSS selector and descriptor. See
2304 * Intel Instruction reference for INT n.
2305 *
2306 * @returns VBox strict status code.
2307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2308 * @param enmTaskSwitch The cause of the task switch.
2309 * @param uNextEip The EIP effective after the task switch.
2310 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2311 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2312 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2313 * @param SelTSS The TSS selector of the new task.
2314 * @param pNewDescTSS Pointer to the new TSS descriptor.
2315 */
2316VBOXSTRICTRC
2317iemTaskSwitch(PVMCPUCC pVCpu,
2318 IEMTASKSWITCH enmTaskSwitch,
2319 uint32_t uNextEip,
2320 uint32_t fFlags,
2321 uint16_t uErr,
2322 uint64_t uCr2,
2323 RTSEL SelTSS,
2324 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2325{
2326 Assert(!IEM_IS_REAL_MODE(pVCpu));
2327 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2328 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2329
2330 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2331 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2332 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2333 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2334 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2335
2336 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2337 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2338
2339 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2340 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2341
2342 /* Update CR2 in case it's a page-fault. */
2343 /** @todo This should probably be done much earlier in IEM/PGM. See
2344 * @bugref{5653#c49}. */
2345 if (fFlags & IEM_XCPT_FLAGS_CR2)
2346 pVCpu->cpum.GstCtx.cr2 = uCr2;
2347
2348 /*
2349 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2350 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2351 */
2352 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2353 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2354 if (uNewTSSLimit < uNewTSSLimitMin)
2355 {
2356 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2357 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2358 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2359 }
2360
2361 /*
2362 * Task switches in VMX non-root mode always cause task switches.
2363 * The new TSS must have been read and validated (DPL, limits etc.) before a
2364 * task-switch VM-exit commences.
2365 *
2366 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2367 */
2368 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2369 {
2370 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2371 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2372 }
2373
2374 /*
2375 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2376 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2377 */
2378 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2379 {
2380 uint32_t const uExitInfo1 = SelTSS;
2381 uint32_t uExitInfo2 = uErr;
2382 switch (enmTaskSwitch)
2383 {
2384 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2385 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2386 default: break;
2387 }
2388 if (fFlags & IEM_XCPT_FLAGS_ERR)
2389 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2390 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2391 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2392
2393 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2394 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2395 RT_NOREF2(uExitInfo1, uExitInfo2);
2396 }
2397
2398 /*
2399 * Check the current TSS limit. The last written byte to the current TSS during the
2400 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2401 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2402 *
2403 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2404 * end up with smaller than "legal" TSS limits.
2405 */
2406 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2407 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2408 if (uCurTSSLimit < uCurTSSLimitMin)
2409 {
2410 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2411 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2412 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2413 }
2414
2415 /*
2416 * Verify that the new TSS can be accessed and map it. Map only the required contents
2417 * and not the entire TSS.
2418 */
2419 void *pvNewTSS;
2420 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2421 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2422 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2423 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2424 * not perform correct translation if this happens. See Intel spec. 7.2.1
2425 * "Task-State Segment". */
2426 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2430 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2431 return rcStrict;
2432 }
2433
2434 /*
2435 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2436 */
2437 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2438 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2439 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2440 {
2441 PX86DESC pDescCurTSS;
2442 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2443 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2444 if (rcStrict != VINF_SUCCESS)
2445 {
2446 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2447 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2448 return rcStrict;
2449 }
2450
2451 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2452 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2453 if (rcStrict != VINF_SUCCESS)
2454 {
2455 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2456 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2457 return rcStrict;
2458 }
2459
2460 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2461 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2462 {
2463 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2464 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2465 fEFlags &= ~X86_EFL_NT;
2466 }
2467 }
2468
2469 /*
2470 * Save the CPU state into the current TSS.
2471 */
2472 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2473 if (GCPtrNewTSS == GCPtrCurTSS)
2474 {
2475 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2476 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2477 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2478 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2479 pVCpu->cpum.GstCtx.ldtr.Sel));
2480 }
2481 if (fIsNewTSS386)
2482 {
2483 /*
2484 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2485 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2486 */
2487 void *pvCurTSS32;
2488 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2489 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2490 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2491 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2492 if (rcStrict != VINF_SUCCESS)
2493 {
2494 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2495 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2496 return rcStrict;
2497 }
2498
2499 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2500 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2501 pCurTSS32->eip = uNextEip;
2502 pCurTSS32->eflags = fEFlags;
2503 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2504 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2505 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2506 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2507 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2508 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2509 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2510 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2511 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2512 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2513 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2514 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2515 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2516 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2517
2518 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2519 if (rcStrict != VINF_SUCCESS)
2520 {
2521 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2522 VBOXSTRICTRC_VAL(rcStrict)));
2523 return rcStrict;
2524 }
2525 }
2526 else
2527 {
2528 /*
2529 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2530 */
2531 void *pvCurTSS16;
2532 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2533 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2534 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2535 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2536 if (rcStrict != VINF_SUCCESS)
2537 {
2538 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2539 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2540 return rcStrict;
2541 }
2542
2543 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2544 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2545 pCurTSS16->ip = uNextEip;
2546 pCurTSS16->flags = (uint16_t)fEFlags;
2547 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2548 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2549 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2550 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2551 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2552 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2553 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2554 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2555 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2556 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2557 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2558 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2559
2560 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2561 if (rcStrict != VINF_SUCCESS)
2562 {
2563 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2564 VBOXSTRICTRC_VAL(rcStrict)));
2565 return rcStrict;
2566 }
2567 }
2568
2569 /*
2570 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2571 */
2572 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2573 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2574 {
2575 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2576 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2577 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2578 }
2579
2580 /*
2581 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2582 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2583 */
2584 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2585 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2586 bool fNewDebugTrap;
2587 if (fIsNewTSS386)
2588 {
2589 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2590 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2591 uNewEip = pNewTSS32->eip;
2592 uNewEflags = pNewTSS32->eflags;
2593 uNewEax = pNewTSS32->eax;
2594 uNewEcx = pNewTSS32->ecx;
2595 uNewEdx = pNewTSS32->edx;
2596 uNewEbx = pNewTSS32->ebx;
2597 uNewEsp = pNewTSS32->esp;
2598 uNewEbp = pNewTSS32->ebp;
2599 uNewEsi = pNewTSS32->esi;
2600 uNewEdi = pNewTSS32->edi;
2601 uNewES = pNewTSS32->es;
2602 uNewCS = pNewTSS32->cs;
2603 uNewSS = pNewTSS32->ss;
2604 uNewDS = pNewTSS32->ds;
2605 uNewFS = pNewTSS32->fs;
2606 uNewGS = pNewTSS32->gs;
2607 uNewLdt = pNewTSS32->selLdt;
2608 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2609 }
2610 else
2611 {
2612 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2613 uNewCr3 = 0;
2614 uNewEip = pNewTSS16->ip;
2615 uNewEflags = pNewTSS16->flags;
2616 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2617 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2618 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2619 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2620 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2621 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2622 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2623 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2624 uNewES = pNewTSS16->es;
2625 uNewCS = pNewTSS16->cs;
2626 uNewSS = pNewTSS16->ss;
2627 uNewDS = pNewTSS16->ds;
2628 uNewFS = 0;
2629 uNewGS = 0;
2630 uNewLdt = pNewTSS16->selLdt;
2631 fNewDebugTrap = false;
2632 }
2633
2634 if (GCPtrNewTSS == GCPtrCurTSS)
2635 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2636 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2637
2638 /*
2639 * We're done accessing the new TSS.
2640 */
2641 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2642 if (rcStrict != VINF_SUCCESS)
2643 {
2644 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2645 return rcStrict;
2646 }
2647
2648 /*
2649 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2650 */
2651 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2652 {
2653 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2654 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2655 if (rcStrict != VINF_SUCCESS)
2656 {
2657 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2658 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2659 return rcStrict;
2660 }
2661
2662 /* Check that the descriptor indicates the new TSS is available (not busy). */
2663 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2664 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2665 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2666
2667 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2668 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2669 if (rcStrict != VINF_SUCCESS)
2670 {
2671 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2672 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2673 return rcStrict;
2674 }
2675 }
2676
2677 /*
2678 * From this point on, we're technically in the new task. We will defer exceptions
2679 * until the completion of the task switch but before executing any instructions in the new task.
2680 */
2681 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2682 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2683 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2684 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2685 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2686 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2687 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2688
2689 /* Set the busy bit in TR. */
2690 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2691
2692 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2693 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2694 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2695 {
2696 uNewEflags |= X86_EFL_NT;
2697 }
2698
2699 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2700 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2701 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2702
2703 pVCpu->cpum.GstCtx.eip = uNewEip;
2704 pVCpu->cpum.GstCtx.eax = uNewEax;
2705 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2706 pVCpu->cpum.GstCtx.edx = uNewEdx;
2707 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2708 pVCpu->cpum.GstCtx.esp = uNewEsp;
2709 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2710 pVCpu->cpum.GstCtx.esi = uNewEsi;
2711 pVCpu->cpum.GstCtx.edi = uNewEdi;
2712
2713 uNewEflags &= X86_EFL_LIVE_MASK;
2714 uNewEflags |= X86_EFL_RA1_MASK;
2715 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2716
2717 /*
2718 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2719 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2720 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2721 */
2722 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2723 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2724
2725 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2726 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2727
2728 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2729 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2730
2731 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2732 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2733
2734 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2735 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2736
2737 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2738 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2739 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2740
2741 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2742 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2743 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2744 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2745
2746 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2747 {
2748 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2749 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2750 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2751 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2752 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2753 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2754 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2755 }
2756
2757 /*
2758 * Switch CR3 for the new task.
2759 */
2760 if ( fIsNewTSS386
2761 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2762 {
2763 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2764 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2765 AssertRCSuccessReturn(rc, rc);
2766
2767 /* Inform PGM. */
2768 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2769 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2770 AssertRCReturn(rc, rc);
2771 /* ignore informational status codes */
2772
2773 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2774 }
2775
2776 /*
2777 * Switch LDTR for the new task.
2778 */
2779 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2780 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2781 else
2782 {
2783 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2784
2785 IEMSELDESC DescNewLdt;
2786 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2787 if (rcStrict != VINF_SUCCESS)
2788 {
2789 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2790 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2791 return rcStrict;
2792 }
2793 if ( !DescNewLdt.Legacy.Gen.u1Present
2794 || DescNewLdt.Legacy.Gen.u1DescType
2795 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2796 {
2797 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2798 uNewLdt, DescNewLdt.Legacy.u));
2799 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2800 }
2801
2802 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2803 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2804 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2805 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2806 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2807 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2808 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2809 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2810 }
2811
2812 IEMSELDESC DescSS;
2813 if (IEM_IS_V86_MODE(pVCpu))
2814 {
2815 IEM_SET_CPL(pVCpu, 3);
2816 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2817 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2818 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2819 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2820 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2821 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2822
2823 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2824 DescSS.Legacy.u = 0;
2825 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2826 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2827 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2828 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2829 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2830 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2831 DescSS.Legacy.Gen.u2Dpl = 3;
2832 }
2833 else
2834 {
2835 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2836
2837 /*
2838 * Load the stack segment for the new task.
2839 */
2840 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2841 {
2842 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2843 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2844 }
2845
2846 /* Fetch the descriptor. */
2847 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2848 if (rcStrict != VINF_SUCCESS)
2849 {
2850 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2851 VBOXSTRICTRC_VAL(rcStrict)));
2852 return rcStrict;
2853 }
2854
2855 /* SS must be a data segment and writable. */
2856 if ( !DescSS.Legacy.Gen.u1DescType
2857 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2858 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2859 {
2860 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2861 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2862 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2863 }
2864
2865 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2866 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2867 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2868 {
2869 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2870 uNewCpl));
2871 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2872 }
2873
2874 /* Is it there? */
2875 if (!DescSS.Legacy.Gen.u1Present)
2876 {
2877 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2878 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2879 }
2880
2881 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2882 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2883
2884 /* Set the accessed bit before committing the result into SS. */
2885 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2886 {
2887 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2888 if (rcStrict != VINF_SUCCESS)
2889 return rcStrict;
2890 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2891 }
2892
2893 /* Commit SS. */
2894 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2895 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2896 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2897 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2898 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2899 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2900 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2901
2902 /* CPL has changed, update IEM before loading rest of segments. */
2903 IEM_SET_CPL(pVCpu, uNewCpl);
2904
2905 /*
2906 * Load the data segments for the new task.
2907 */
2908 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2909 if (rcStrict != VINF_SUCCESS)
2910 return rcStrict;
2911 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2912 if (rcStrict != VINF_SUCCESS)
2913 return rcStrict;
2914 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2915 if (rcStrict != VINF_SUCCESS)
2916 return rcStrict;
2917 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2918 if (rcStrict != VINF_SUCCESS)
2919 return rcStrict;
2920
2921 /*
2922 * Load the code segment for the new task.
2923 */
2924 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2925 {
2926 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2927 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2928 }
2929
2930 /* Fetch the descriptor. */
2931 IEMSELDESC DescCS;
2932 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2933 if (rcStrict != VINF_SUCCESS)
2934 {
2935 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2936 return rcStrict;
2937 }
2938
2939 /* CS must be a code segment. */
2940 if ( !DescCS.Legacy.Gen.u1DescType
2941 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2942 {
2943 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2944 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2945 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2946 }
2947
2948 /* For conforming CS, DPL must be less than or equal to the RPL. */
2949 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2950 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2951 {
2952 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2953 DescCS.Legacy.Gen.u2Dpl));
2954 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2955 }
2956
2957 /* For non-conforming CS, DPL must match RPL. */
2958 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2959 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2960 {
2961 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2962 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2963 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2964 }
2965
2966 /* Is it there? */
2967 if (!DescCS.Legacy.Gen.u1Present)
2968 {
2969 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2970 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2971 }
2972
2973 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2974 u64Base = X86DESC_BASE(&DescCS.Legacy);
2975
2976 /* Set the accessed bit before committing the result into CS. */
2977 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2978 {
2979 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2980 if (rcStrict != VINF_SUCCESS)
2981 return rcStrict;
2982 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2983 }
2984
2985 /* Commit CS. */
2986 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2987 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2988 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2989 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2990 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2991 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2992 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2993 }
2994
2995 /* Make sure the CPU mode is correct. */
2996 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2997 if (fExecNew != pVCpu->iem.s.fExec)
2998 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2999 pVCpu->iem.s.fExec = fExecNew;
3000
3001 /** @todo Debug trap. */
3002 if (fIsNewTSS386 && fNewDebugTrap)
3003 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3004
3005 /*
3006 * Construct the error code masks based on what caused this task switch.
3007 * See Intel Instruction reference for INT.
3008 */
3009 uint16_t uExt;
3010 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3011 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3012 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3013 uExt = 1;
3014 else
3015 uExt = 0;
3016
3017 /*
3018 * Push any error code on to the new stack.
3019 */
3020 if (fFlags & IEM_XCPT_FLAGS_ERR)
3021 {
3022 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3023 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3024 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3025
3026 /* Check that there is sufficient space on the stack. */
3027 /** @todo Factor out segment limit checking for normal/expand down segments
3028 * into a separate function. */
3029 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3030 {
3031 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3032 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3033 {
3034 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3035 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3036 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3037 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3038 }
3039 }
3040 else
3041 {
3042 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3043 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3044 {
3045 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3046 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3047 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3048 }
3049 }
3050
3051
3052 if (fIsNewTSS386)
3053 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3054 else
3055 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3056 if (rcStrict != VINF_SUCCESS)
3057 {
3058 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3059 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3060 return rcStrict;
3061 }
3062 }
3063
3064 /* Check the new EIP against the new CS limit. */
3065 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3066 {
3067 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3068 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3069 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3070 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3071 }
3072
3073 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3074 pVCpu->cpum.GstCtx.ss.Sel));
3075 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3076}
3077
3078
3079/**
3080 * Implements exceptions and interrupts for protected mode.
3081 *
3082 * @returns VBox strict status code.
3083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3084 * @param cbInstr The number of bytes to offset rIP by in the return
3085 * address.
3086 * @param u8Vector The interrupt / exception vector number.
3087 * @param fFlags The flags.
3088 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3089 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3090 */
3091static VBOXSTRICTRC
3092iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3093 uint8_t cbInstr,
3094 uint8_t u8Vector,
3095 uint32_t fFlags,
3096 uint16_t uErr,
3097 uint64_t uCr2) RT_NOEXCEPT
3098{
3099 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3100
3101 /*
3102 * Read the IDT entry.
3103 */
3104 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3105 {
3106 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3107 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3108 }
3109 X86DESC Idte;
3110 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3111 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3112 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3113 {
3114 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3115 return rcStrict;
3116 }
3117 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3118 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3119 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3121
3122 /*
3123 * Check the descriptor type, DPL and such.
3124 * ASSUMES this is done in the same order as described for call-gate calls.
3125 */
3126 if (Idte.Gate.u1DescType)
3127 {
3128 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3129 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3130 }
3131 bool fTaskGate = false;
3132 uint8_t f32BitGate = true;
3133 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3134 switch (Idte.Gate.u4Type)
3135 {
3136 case X86_SEL_TYPE_SYS_UNDEFINED:
3137 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3138 case X86_SEL_TYPE_SYS_LDT:
3139 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3140 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3141 case X86_SEL_TYPE_SYS_UNDEFINED2:
3142 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3143 case X86_SEL_TYPE_SYS_UNDEFINED3:
3144 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3145 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3146 case X86_SEL_TYPE_SYS_UNDEFINED4:
3147 {
3148 /** @todo check what actually happens when the type is wrong...
3149 * esp. call gates. */
3150 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3151 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3152 }
3153
3154 case X86_SEL_TYPE_SYS_286_INT_GATE:
3155 f32BitGate = false;
3156 RT_FALL_THRU();
3157 case X86_SEL_TYPE_SYS_386_INT_GATE:
3158 fEflToClear |= X86_EFL_IF;
3159 break;
3160
3161 case X86_SEL_TYPE_SYS_TASK_GATE:
3162 fTaskGate = true;
3163#ifndef IEM_IMPLEMENTS_TASKSWITCH
3164 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3165#endif
3166 break;
3167
3168 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3169 f32BitGate = false;
3170 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3171 break;
3172
3173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3174 }
3175
3176 /* Check DPL against CPL if applicable. */
3177 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3178 {
3179 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3180 {
3181 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3182 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3183 }
3184 }
3185
3186 /* Is it there? */
3187 if (!Idte.Gate.u1Present)
3188 {
3189 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3190 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3191 }
3192
3193 /* Is it a task-gate? */
3194 if (fTaskGate)
3195 {
3196 /*
3197 * Construct the error code masks based on what caused this task switch.
3198 * See Intel Instruction reference for INT.
3199 */
3200 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3201 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3202 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3203 RTSEL SelTSS = Idte.Gate.u16Sel;
3204
3205 /*
3206 * Fetch the TSS descriptor in the GDT.
3207 */
3208 IEMSELDESC DescTSS;
3209 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3210 if (rcStrict != VINF_SUCCESS)
3211 {
3212 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3213 VBOXSTRICTRC_VAL(rcStrict)));
3214 return rcStrict;
3215 }
3216
3217 /* The TSS descriptor must be a system segment and be available (not busy). */
3218 if ( DescTSS.Legacy.Gen.u1DescType
3219 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3220 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3221 {
3222 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3223 u8Vector, SelTSS, DescTSS.Legacy.au64));
3224 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3225 }
3226
3227 /* The TSS must be present. */
3228 if (!DescTSS.Legacy.Gen.u1Present)
3229 {
3230 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3231 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3232 }
3233
3234 /* Do the actual task switch. */
3235 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3236 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3237 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3238 }
3239
3240 /* A null CS is bad. */
3241 RTSEL NewCS = Idte.Gate.u16Sel;
3242 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3243 {
3244 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3245 return iemRaiseGeneralProtectionFault0(pVCpu);
3246 }
3247
3248 /* Fetch the descriptor for the new CS. */
3249 IEMSELDESC DescCS;
3250 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3251 if (rcStrict != VINF_SUCCESS)
3252 {
3253 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3254 return rcStrict;
3255 }
3256
3257 /* Must be a code segment. */
3258 if (!DescCS.Legacy.Gen.u1DescType)
3259 {
3260 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3261 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3262 }
3263 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3264 {
3265 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3266 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3267 }
3268
3269 /* Don't allow lowering the privilege level. */
3270 /** @todo Does the lowering of privileges apply to software interrupts
3271 * only? This has bearings on the more-privileged or
3272 * same-privilege stack behavior further down. A testcase would
3273 * be nice. */
3274 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3275 {
3276 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3277 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3278 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3279 }
3280
3281 /* Make sure the selector is present. */
3282 if (!DescCS.Legacy.Gen.u1Present)
3283 {
3284 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3285 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3286 }
3287
3288#ifdef LOG_ENABLED
3289 /* If software interrupt, try decode it if logging is enabled and such. */
3290 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3291 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3292 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3293#endif
3294
3295 /* Check the new EIP against the new CS limit. */
3296 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3297 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3298 ? Idte.Gate.u16OffsetLow
3299 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3300 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3301 if (uNewEip > cbLimitCS)
3302 {
3303 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3304 u8Vector, uNewEip, cbLimitCS, NewCS));
3305 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3306 }
3307 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3308
3309 /* Calc the flag image to push. */
3310 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3311 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3312 fEfl &= ~X86_EFL_RF;
3313 else
3314 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3315
3316 /* From V8086 mode only go to CPL 0. */
3317 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3318 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3319 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3320 {
3321 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3322 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3323 }
3324
3325 /*
3326 * If the privilege level changes, we need to get a new stack from the TSS.
3327 * This in turns means validating the new SS and ESP...
3328 */
3329 if (uNewCpl != IEM_GET_CPL(pVCpu))
3330 {
3331 RTSEL NewSS;
3332 uint32_t uNewEsp;
3333 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3334 if (rcStrict != VINF_SUCCESS)
3335 return rcStrict;
3336
3337 IEMSELDESC DescSS;
3338 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3339 if (rcStrict != VINF_SUCCESS)
3340 return rcStrict;
3341 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3342 if (!DescSS.Legacy.Gen.u1DefBig)
3343 {
3344 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3345 uNewEsp = (uint16_t)uNewEsp;
3346 }
3347
3348 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3349
3350 /* Check that there is sufficient space for the stack frame. */
3351 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3352 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3353 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3354 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3355
3356 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3357 {
3358 if ( uNewEsp - 1 > cbLimitSS
3359 || uNewEsp < cbStackFrame)
3360 {
3361 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3362 u8Vector, NewSS, uNewEsp, cbStackFrame));
3363 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3364 }
3365 }
3366 else
3367 {
3368 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3369 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3370 {
3371 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3372 u8Vector, NewSS, uNewEsp, cbStackFrame));
3373 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3374 }
3375 }
3376
3377 /*
3378 * Start making changes.
3379 */
3380
3381 /* Set the new CPL so that stack accesses use it. */
3382 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3383 IEM_SET_CPL(pVCpu, uNewCpl);
3384
3385 /* Create the stack frame. */
3386 RTPTRUNION uStackFrame;
3387 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3388 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3389 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3390 if (rcStrict != VINF_SUCCESS)
3391 return rcStrict;
3392 void * const pvStackFrame = uStackFrame.pv;
3393 if (f32BitGate)
3394 {
3395 if (fFlags & IEM_XCPT_FLAGS_ERR)
3396 *uStackFrame.pu32++ = uErr;
3397 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3398 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3399 uStackFrame.pu32[2] = fEfl;
3400 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3401 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3402 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3403 if (fEfl & X86_EFL_VM)
3404 {
3405 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3406 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3407 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3408 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3409 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3410 }
3411 }
3412 else
3413 {
3414 if (fFlags & IEM_XCPT_FLAGS_ERR)
3415 *uStackFrame.pu16++ = uErr;
3416 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3417 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3418 uStackFrame.pu16[2] = fEfl;
3419 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3420 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3421 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3422 if (fEfl & X86_EFL_VM)
3423 {
3424 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3425 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3426 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3427 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3428 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3429 }
3430 }
3431 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3432 if (rcStrict != VINF_SUCCESS)
3433 return rcStrict;
3434
3435 /* Mark the selectors 'accessed' (hope this is the correct time). */
3436 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3437 * after pushing the stack frame? (Write protect the gdt + stack to
3438 * find out.) */
3439 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3440 {
3441 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3442 if (rcStrict != VINF_SUCCESS)
3443 return rcStrict;
3444 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3445 }
3446
3447 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3448 {
3449 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3450 if (rcStrict != VINF_SUCCESS)
3451 return rcStrict;
3452 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3453 }
3454
3455 /*
3456 * Start comitting the register changes (joins with the DPL=CPL branch).
3457 */
3458 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3459 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3460 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3461 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3462 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3463 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3464 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3465 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3466 * SP is loaded).
3467 * Need to check the other combinations too:
3468 * - 16-bit TSS, 32-bit handler
3469 * - 32-bit TSS, 16-bit handler */
3470 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3471 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3472 else
3473 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3474
3475 if (fEfl & X86_EFL_VM)
3476 {
3477 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3478 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3479 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3480 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3481 }
3482 }
3483 /*
3484 * Same privilege, no stack change and smaller stack frame.
3485 */
3486 else
3487 {
3488 uint64_t uNewRsp;
3489 RTPTRUNION uStackFrame;
3490 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3491 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3492 if (rcStrict != VINF_SUCCESS)
3493 return rcStrict;
3494 void * const pvStackFrame = uStackFrame.pv;
3495
3496 if (f32BitGate)
3497 {
3498 if (fFlags & IEM_XCPT_FLAGS_ERR)
3499 *uStackFrame.pu32++ = uErr;
3500 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3501 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3502 uStackFrame.pu32[2] = fEfl;
3503 }
3504 else
3505 {
3506 if (fFlags & IEM_XCPT_FLAGS_ERR)
3507 *uStackFrame.pu16++ = uErr;
3508 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3509 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3510 uStackFrame.pu16[2] = fEfl;
3511 }
3512 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3513 if (rcStrict != VINF_SUCCESS)
3514 return rcStrict;
3515
3516 /* Mark the CS selector as 'accessed'. */
3517 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3518 {
3519 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3520 if (rcStrict != VINF_SUCCESS)
3521 return rcStrict;
3522 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3523 }
3524
3525 /*
3526 * Start committing the register changes (joins with the other branch).
3527 */
3528 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3529 }
3530
3531 /* ... register committing continues. */
3532 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3533 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3534 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3535 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3536 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3537 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3538
3539 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3540 fEfl &= ~fEflToClear;
3541 IEMMISC_SET_EFL(pVCpu, fEfl);
3542
3543 if (fFlags & IEM_XCPT_FLAGS_CR2)
3544 pVCpu->cpum.GstCtx.cr2 = uCr2;
3545
3546 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3547 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3548
3549 /* Make sure the execution flags are correct. */
3550 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3551 if (fExecNew != pVCpu->iem.s.fExec)
3552 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3553 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3554 pVCpu->iem.s.fExec = fExecNew;
3555 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3556
3557 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3558}
3559
3560
3561/**
3562 * Implements exceptions and interrupts for long mode.
3563 *
3564 * @returns VBox strict status code.
3565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3566 * @param cbInstr The number of bytes to offset rIP by in the return
3567 * address.
3568 * @param u8Vector The interrupt / exception vector number.
3569 * @param fFlags The flags.
3570 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3571 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3572 */
3573static VBOXSTRICTRC
3574iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3575 uint8_t cbInstr,
3576 uint8_t u8Vector,
3577 uint32_t fFlags,
3578 uint16_t uErr,
3579 uint64_t uCr2) RT_NOEXCEPT
3580{
3581 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3582
3583 /*
3584 * Read the IDT entry.
3585 */
3586 uint16_t offIdt = (uint16_t)u8Vector << 4;
3587 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3588 {
3589 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3590 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3591 }
3592 X86DESC64 Idte;
3593#ifdef _MSC_VER /* Shut up silly compiler warning. */
3594 Idte.au64[0] = 0;
3595 Idte.au64[1] = 0;
3596#endif
3597 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3598 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3599 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3600 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3601 {
3602 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3603 return rcStrict;
3604 }
3605 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3606 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3607 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3608
3609 /*
3610 * Check the descriptor type, DPL and such.
3611 * ASSUMES this is done in the same order as described for call-gate calls.
3612 */
3613 if (Idte.Gate.u1DescType)
3614 {
3615 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3616 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3617 }
3618 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3619 switch (Idte.Gate.u4Type)
3620 {
3621 case AMD64_SEL_TYPE_SYS_INT_GATE:
3622 fEflToClear |= X86_EFL_IF;
3623 break;
3624 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3625 break;
3626
3627 default:
3628 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3629 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3630 }
3631
3632 /* Check DPL against CPL if applicable. */
3633 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3634 {
3635 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3636 {
3637 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3638 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3639 }
3640 }
3641
3642 /* Is it there? */
3643 if (!Idte.Gate.u1Present)
3644 {
3645 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3646 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3647 }
3648
3649 /* A null CS is bad. */
3650 RTSEL NewCS = Idte.Gate.u16Sel;
3651 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3652 {
3653 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3654 return iemRaiseGeneralProtectionFault0(pVCpu);
3655 }
3656
3657 /* Fetch the descriptor for the new CS. */
3658 IEMSELDESC DescCS;
3659 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3660 if (rcStrict != VINF_SUCCESS)
3661 {
3662 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3663 return rcStrict;
3664 }
3665
3666 /* Must be a 64-bit code segment. */
3667 if (!DescCS.Long.Gen.u1DescType)
3668 {
3669 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3670 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3671 }
3672 if ( !DescCS.Long.Gen.u1Long
3673 || DescCS.Long.Gen.u1DefBig
3674 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3675 {
3676 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3677 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3678 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3679 }
3680
3681 /* Don't allow lowering the privilege level. For non-conforming CS
3682 selectors, the CS.DPL sets the privilege level the trap/interrupt
3683 handler runs at. For conforming CS selectors, the CPL remains
3684 unchanged, but the CS.DPL must be <= CPL. */
3685 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3686 * when CPU in Ring-0. Result \#GP? */
3687 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3688 {
3689 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3690 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3691 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3692 }
3693
3694
3695 /* Make sure the selector is present. */
3696 if (!DescCS.Legacy.Gen.u1Present)
3697 {
3698 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3699 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3700 }
3701
3702 /* Check that the new RIP is canonical. */
3703 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3704 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3705 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3706 if (!IEM_IS_CANONICAL(uNewRip))
3707 {
3708 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3709 return iemRaiseGeneralProtectionFault0(pVCpu);
3710 }
3711
3712 /*
3713 * If the privilege level changes or if the IST isn't zero, we need to get
3714 * a new stack from the TSS.
3715 */
3716 uint64_t uNewRsp;
3717 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3718 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3719 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3720 || Idte.Gate.u3IST != 0)
3721 {
3722 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3723 if (rcStrict != VINF_SUCCESS)
3724 return rcStrict;
3725 }
3726 else
3727 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3728 uNewRsp &= ~(uint64_t)0xf;
3729
3730 /*
3731 * Calc the flag image to push.
3732 */
3733 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3734 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3735 fEfl &= ~X86_EFL_RF;
3736 else
3737 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3738
3739 /*
3740 * Start making changes.
3741 */
3742 /* Set the new CPL so that stack accesses use it. */
3743 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3744 IEM_SET_CPL(pVCpu, uNewCpl);
3745/** @todo Setting CPL this early seems wrong as it would affect and errors we
3746 * raise accessing the stack and (?) GDT/LDT... */
3747
3748 /* Create the stack frame. */
3749 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3750 RTPTRUNION uStackFrame;
3751 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3752 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3753 if (rcStrict != VINF_SUCCESS)
3754 return rcStrict;
3755 void * const pvStackFrame = uStackFrame.pv;
3756
3757 if (fFlags & IEM_XCPT_FLAGS_ERR)
3758 *uStackFrame.pu64++ = uErr;
3759 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3760 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3761 uStackFrame.pu64[2] = fEfl;
3762 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3763 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3764 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3765 if (rcStrict != VINF_SUCCESS)
3766 return rcStrict;
3767
3768 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3769 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3770 * after pushing the stack frame? (Write protect the gdt + stack to
3771 * find out.) */
3772 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3773 {
3774 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3775 if (rcStrict != VINF_SUCCESS)
3776 return rcStrict;
3777 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3778 }
3779
3780 /*
3781 * Start comitting the register changes.
3782 */
3783 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3784 * hidden registers when interrupting 32-bit or 16-bit code! */
3785 if (uNewCpl != uOldCpl)
3786 {
3787 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3788 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3789 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3790 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3791 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3792 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3793 }
3794 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3795 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3796 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3797 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3798 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3799 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3800 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3801 pVCpu->cpum.GstCtx.rip = uNewRip;
3802
3803 fEfl &= ~fEflToClear;
3804 IEMMISC_SET_EFL(pVCpu, fEfl);
3805
3806 if (fFlags & IEM_XCPT_FLAGS_CR2)
3807 pVCpu->cpum.GstCtx.cr2 = uCr2;
3808
3809 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3810 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3811
3812 iemRecalcExecModeAndCplFlags(pVCpu);
3813
3814 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3815}
3816
3817
3818/**
3819 * Implements exceptions and interrupts.
3820 *
3821 * All exceptions and interrupts goes thru this function!
3822 *
3823 * @returns VBox strict status code.
3824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3825 * @param cbInstr The number of bytes to offset rIP by in the return
3826 * address.
3827 * @param u8Vector The interrupt / exception vector number.
3828 * @param fFlags The flags.
3829 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3830 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3831 */
3832VBOXSTRICTRC
3833iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3834 uint8_t cbInstr,
3835 uint8_t u8Vector,
3836 uint32_t fFlags,
3837 uint16_t uErr,
3838 uint64_t uCr2) RT_NOEXCEPT
3839{
3840 /*
3841 * Get all the state that we might need here.
3842 */
3843 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3844 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3845
3846#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3847 /*
3848 * Flush prefetch buffer
3849 */
3850 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3851#endif
3852
3853 /*
3854 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3855 */
3856 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3857 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3858 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3859 | IEM_XCPT_FLAGS_BP_INSTR
3860 | IEM_XCPT_FLAGS_ICEBP_INSTR
3861 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3862 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3863 {
3864 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3865 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3866 u8Vector = X86_XCPT_GP;
3867 uErr = 0;
3868 }
3869
3870 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3871#ifdef DBGFTRACE_ENABLED
3872 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3873 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3874 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3875#endif
3876
3877 /*
3878 * Check if DBGF wants to intercept the exception.
3879 */
3880 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3881 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3882 { /* likely */ }
3883 else
3884 {
3885 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3886 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3887 if (rcStrict != VINF_SUCCESS)
3888 return rcStrict;
3889 }
3890
3891 /*
3892 * Evaluate whether NMI blocking should be in effect.
3893 * Normally, NMI blocking is in effect whenever we inject an NMI.
3894 */
3895 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3896 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3897
3898#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3899 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3900 {
3901 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3902 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3903 return rcStrict0;
3904
3905 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3906 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3907 {
3908 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3909 fBlockNmi = false;
3910 }
3911 }
3912#endif
3913
3914#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3915 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3916 {
3917 /*
3918 * If the event is being injected as part of VMRUN, it isn't subject to event
3919 * intercepts in the nested-guest. However, secondary exceptions that occur
3920 * during injection of any event -are- subject to exception intercepts.
3921 *
3922 * See AMD spec. 15.20 "Event Injection".
3923 */
3924 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3925 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3926 else
3927 {
3928 /*
3929 * Check and handle if the event being raised is intercepted.
3930 */
3931 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3932 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3933 return rcStrict0;
3934 }
3935 }
3936#endif
3937
3938 /*
3939 * Set NMI blocking if necessary.
3940 */
3941 if (fBlockNmi)
3942 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3943
3944 /*
3945 * Do recursion accounting.
3946 */
3947 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3948 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3949 if (pVCpu->iem.s.cXcptRecursions == 0)
3950 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3951 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3952 else
3953 {
3954 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3955 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3956 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3957
3958 if (pVCpu->iem.s.cXcptRecursions >= 4)
3959 {
3960#ifdef DEBUG_bird
3961 AssertFailed();
3962#endif
3963 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3964 }
3965
3966 /*
3967 * Evaluate the sequence of recurring events.
3968 */
3969 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3970 NULL /* pXcptRaiseInfo */);
3971 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3972 { /* likely */ }
3973 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3974 {
3975 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3976 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3977 u8Vector = X86_XCPT_DF;
3978 uErr = 0;
3979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3980 /* VMX nested-guest #DF intercept needs to be checked here. */
3981 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3982 {
3983 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3984 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3985 return rcStrict0;
3986 }
3987#endif
3988 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3989 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3990 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3991 }
3992 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3993 {
3994 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3995 return iemInitiateCpuShutdown(pVCpu);
3996 }
3997 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3998 {
3999 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4000 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4001 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4002 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4003 return VERR_EM_GUEST_CPU_HANG;
4004 }
4005 else
4006 {
4007 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4008 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4009 return VERR_IEM_IPE_9;
4010 }
4011
4012 /*
4013 * The 'EXT' bit is set when an exception occurs during deliver of an external
4014 * event (such as an interrupt or earlier exception)[1]. Privileged software
4015 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4016 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4017 *
4018 * [1] - Intel spec. 6.13 "Error Code"
4019 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4020 * [3] - Intel Instruction reference for INT n.
4021 */
4022 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4023 && (fFlags & IEM_XCPT_FLAGS_ERR)
4024 && u8Vector != X86_XCPT_PF
4025 && u8Vector != X86_XCPT_DF)
4026 {
4027 uErr |= X86_TRAP_ERR_EXTERNAL;
4028 }
4029 }
4030
4031 pVCpu->iem.s.cXcptRecursions++;
4032 pVCpu->iem.s.uCurXcpt = u8Vector;
4033 pVCpu->iem.s.fCurXcpt = fFlags;
4034 pVCpu->iem.s.uCurXcptErr = uErr;
4035 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4036
4037 /*
4038 * Extensive logging.
4039 */
4040#if defined(LOG_ENABLED) && defined(IN_RING3)
4041 if (LogIs3Enabled())
4042 {
4043 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4044 char szRegs[4096];
4045 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4046 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4047 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4048 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4049 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4050 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4051 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4052 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4053 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4054 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4055 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4056 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4057 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4058 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4059 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4060 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4061 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4062 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4063 " efer=%016VR{efer}\n"
4064 " pat=%016VR{pat}\n"
4065 " sf_mask=%016VR{sf_mask}\n"
4066 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4067 " lstar=%016VR{lstar}\n"
4068 " star=%016VR{star} cstar=%016VR{cstar}\n"
4069 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4070 );
4071
4072 char szInstr[256];
4073 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4074 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4075 szInstr, sizeof(szInstr), NULL);
4076 Log3(("%s%s\n", szRegs, szInstr));
4077 }
4078#endif /* LOG_ENABLED */
4079
4080 /*
4081 * Stats.
4082 */
4083 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4084 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4085 else if (u8Vector <= X86_XCPT_LAST)
4086 {
4087 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4088 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4089 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4090 }
4091
4092 /*
4093 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4094 * to ensure that a stale TLB or paging cache entry will only cause one
4095 * spurious #PF.
4096 */
4097 if ( u8Vector == X86_XCPT_PF
4098 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4099 IEMTlbInvalidatePage(pVCpu, uCr2);
4100
4101 /*
4102 * Call the mode specific worker function.
4103 */
4104 VBOXSTRICTRC rcStrict;
4105 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4106 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4107 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4108 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4109 else
4110 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4111
4112 /* Flush the prefetch buffer. */
4113 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4114
4115 /*
4116 * Unwind.
4117 */
4118 pVCpu->iem.s.cXcptRecursions--;
4119 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4120 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4121 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4122 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4123 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4124 return rcStrict;
4125}
4126
4127#ifdef IEM_WITH_SETJMP
4128/**
4129 * See iemRaiseXcptOrInt. Will not return.
4130 */
4131DECL_NO_RETURN(void)
4132iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4133 uint8_t cbInstr,
4134 uint8_t u8Vector,
4135 uint32_t fFlags,
4136 uint16_t uErr,
4137 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4138{
4139 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4140 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4141}
4142#endif
4143
4144
4145/** \#DE - 00. */
4146VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4147{
4148 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4149}
4150
4151
4152/** \#DB - 01.
4153 * @note This automatically clear DR7.GD. */
4154VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4155{
4156 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4157 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4159}
4160
4161
4162/** \#BR - 05. */
4163VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4164{
4165 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4166}
4167
4168
4169/** \#UD - 06. */
4170VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4171{
4172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4173}
4174
4175
4176/** \#NM - 07. */
4177VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4178{
4179 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4180}
4181
4182
4183/** \#TS(err) - 0a. */
4184VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4185{
4186 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4187}
4188
4189
4190/** \#TS(tr) - 0a. */
4191VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4192{
4193 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4194 pVCpu->cpum.GstCtx.tr.Sel, 0);
4195}
4196
4197
4198/** \#TS(0) - 0a. */
4199VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4200{
4201 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4202 0, 0);
4203}
4204
4205
4206/** \#TS(err) - 0a. */
4207VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4208{
4209 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4210 uSel & X86_SEL_MASK_OFF_RPL, 0);
4211}
4212
4213
4214/** \#NP(err) - 0b. */
4215VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4216{
4217 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4218}
4219
4220
4221/** \#NP(sel) - 0b. */
4222VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4223{
4224 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4225 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4226 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4227 uSel & ~X86_SEL_RPL, 0);
4228}
4229
4230
4231/** \#SS(seg) - 0c. */
4232VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4233{
4234 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4235 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4236 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4237 uSel & ~X86_SEL_RPL, 0);
4238}
4239
4240
4241/** \#SS(err) - 0c. */
4242VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4243{
4244 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4245 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4246 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4247}
4248
4249
4250/** \#GP(n) - 0d. */
4251VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4252{
4253 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4254 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4255}
4256
4257
4258/** \#GP(0) - 0d. */
4259VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4260{
4261 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4262 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4263}
4264
4265#ifdef IEM_WITH_SETJMP
4266/** \#GP(0) - 0d. */
4267DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4268{
4269 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4270 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4271}
4272#endif
4273
4274
4275/** \#GP(sel) - 0d. */
4276VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4277{
4278 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4279 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4280 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4281 Sel & ~X86_SEL_RPL, 0);
4282}
4283
4284
4285/** \#GP(0) - 0d. */
4286VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4287{
4288 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4289 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4290}
4291
4292
4293/** \#GP(sel) - 0d. */
4294VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4295{
4296 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4297 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4298 NOREF(iSegReg); NOREF(fAccess);
4299 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4300 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4301}
4302
4303#ifdef IEM_WITH_SETJMP
4304/** \#GP(sel) - 0d, longjmp. */
4305DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4306{
4307 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4308 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4309 NOREF(iSegReg); NOREF(fAccess);
4310 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4311 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4312}
4313#endif
4314
4315/** \#GP(sel) - 0d. */
4316VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4317{
4318 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4319 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4320 NOREF(Sel);
4321 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4322}
4323
4324#ifdef IEM_WITH_SETJMP
4325/** \#GP(sel) - 0d, longjmp. */
4326DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4327{
4328 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4329 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4330 NOREF(Sel);
4331 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4332}
4333#endif
4334
4335
4336/** \#GP(sel) - 0d. */
4337VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4338{
4339 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4340 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4341 NOREF(iSegReg); NOREF(fAccess);
4342 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4343}
4344
4345#ifdef IEM_WITH_SETJMP
4346/** \#GP(sel) - 0d, longjmp. */
4347DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4348{
4349 NOREF(iSegReg); NOREF(fAccess);
4350 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4351}
4352#endif
4353
4354
4355/** \#PF(n) - 0e. */
4356VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4357{
4358 uint16_t uErr;
4359 switch (rc)
4360 {
4361 case VERR_PAGE_NOT_PRESENT:
4362 case VERR_PAGE_TABLE_NOT_PRESENT:
4363 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4364 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4365 uErr = 0;
4366 break;
4367
4368 default:
4369 AssertMsgFailed(("%Rrc\n", rc));
4370 RT_FALL_THRU();
4371 case VERR_ACCESS_DENIED:
4372 uErr = X86_TRAP_PF_P;
4373 break;
4374
4375 /** @todo reserved */
4376 }
4377
4378 if (IEM_GET_CPL(pVCpu) == 3)
4379 uErr |= X86_TRAP_PF_US;
4380
4381 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4382 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4383 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4384 uErr |= X86_TRAP_PF_ID;
4385
4386#if 0 /* This is so much non-sense, really. Why was it done like that? */
4387 /* Note! RW access callers reporting a WRITE protection fault, will clear
4388 the READ flag before calling. So, read-modify-write accesses (RW)
4389 can safely be reported as READ faults. */
4390 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4391 uErr |= X86_TRAP_PF_RW;
4392#else
4393 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4394 {
4395 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4396 /// (regardless of outcome of the comparison in the latter case).
4397 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4398 uErr |= X86_TRAP_PF_RW;
4399 }
4400#endif
4401
4402 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4403 of the memory operand rather than at the start of it. (Not sure what
4404 happens if it crosses a page boundrary.) The current heuristics for
4405 this is to report the #PF for the last byte if the access is more than
4406 64 bytes. This is probably not correct, but we can work that out later,
4407 main objective now is to get FXSAVE to work like for real hardware and
4408 make bs3-cpu-basic2 work. */
4409 if (cbAccess <= 64)
4410 { /* likely*/ }
4411 else
4412 GCPtrWhere += cbAccess - 1;
4413
4414 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4415 uErr, GCPtrWhere);
4416}
4417
4418#ifdef IEM_WITH_SETJMP
4419/** \#PF(n) - 0e, longjmp. */
4420DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4421 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4422{
4423 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4424}
4425#endif
4426
4427
4428/** \#MF(0) - 10. */
4429VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4430{
4431 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4432 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4433
4434 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4435 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4436 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4437}
4438
4439
4440/** \#AC(0) - 11. */
4441VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4442{
4443 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4444}
4445
4446#ifdef IEM_WITH_SETJMP
4447/** \#AC(0) - 11, longjmp. */
4448DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4449{
4450 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4451}
4452#endif
4453
4454
4455/** \#XF(0)/\#XM(0) - 19. */
4456VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4457{
4458 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4459}
4460
4461
4462/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4463IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4464{
4465 NOREF(cbInstr);
4466 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4467}
4468
4469
4470/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4471IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4472{
4473 NOREF(cbInstr);
4474 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4475}
4476
4477
4478/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4479IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4480{
4481 NOREF(cbInstr);
4482 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4483}
4484
4485
4486/** @} */
4487
4488/** @name Common opcode decoders.
4489 * @{
4490 */
4491//#include <iprt/mem.h>
4492
4493/**
4494 * Used to add extra details about a stub case.
4495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4496 */
4497void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4498{
4499#if defined(LOG_ENABLED) && defined(IN_RING3)
4500 PVM pVM = pVCpu->CTX_SUFF(pVM);
4501 char szRegs[4096];
4502 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4503 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4504 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4505 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4506 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4507 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4508 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4509 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4510 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4511 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4512 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4513 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4514 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4515 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4516 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4517 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4518 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4519 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4520 " efer=%016VR{efer}\n"
4521 " pat=%016VR{pat}\n"
4522 " sf_mask=%016VR{sf_mask}\n"
4523 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4524 " lstar=%016VR{lstar}\n"
4525 " star=%016VR{star} cstar=%016VR{cstar}\n"
4526 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4527 );
4528
4529 char szInstr[256];
4530 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4531 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4532 szInstr, sizeof(szInstr), NULL);
4533
4534 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4535#else
4536 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4537#endif
4538}
4539
4540/** @} */
4541
4542
4543
4544/** @name Register Access.
4545 * @{
4546 */
4547
4548/**
4549 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4550 *
4551 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4552 * segment limit.
4553 *
4554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4555 * @param cbInstr Instruction size.
4556 * @param offNextInstr The offset of the next instruction.
4557 * @param enmEffOpSize Effective operand size.
4558 */
4559VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4560 IEMMODE enmEffOpSize) RT_NOEXCEPT
4561{
4562 switch (enmEffOpSize)
4563 {
4564 case IEMMODE_16BIT:
4565 {
4566 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4567 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4568 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4569 pVCpu->cpum.GstCtx.rip = uNewIp;
4570 else
4571 return iemRaiseGeneralProtectionFault0(pVCpu);
4572 break;
4573 }
4574
4575 case IEMMODE_32BIT:
4576 {
4577 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4578 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4579
4580 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4581 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4582 pVCpu->cpum.GstCtx.rip = uNewEip;
4583 else
4584 return iemRaiseGeneralProtectionFault0(pVCpu);
4585 break;
4586 }
4587
4588 case IEMMODE_64BIT:
4589 {
4590 Assert(IEM_IS_64BIT_CODE(pVCpu));
4591
4592 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4593 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4594 pVCpu->cpum.GstCtx.rip = uNewRip;
4595 else
4596 return iemRaiseGeneralProtectionFault0(pVCpu);
4597 break;
4598 }
4599
4600 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4601 }
4602
4603#ifndef IEM_WITH_CODE_TLB
4604 /* Flush the prefetch buffer. */
4605 pVCpu->iem.s.cbOpcode = cbInstr;
4606#endif
4607
4608 /*
4609 * Clear RF and finish the instruction (maybe raise #DB).
4610 */
4611 return iemRegFinishClearingRF(pVCpu);
4612}
4613
4614
4615/**
4616 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4617 *
4618 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4619 * segment limit.
4620 *
4621 * @returns Strict VBox status code.
4622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4623 * @param cbInstr Instruction size.
4624 * @param offNextInstr The offset of the next instruction.
4625 */
4626VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4627{
4628 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4629
4630 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4631 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4632 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4633 pVCpu->cpum.GstCtx.rip = uNewIp;
4634 else
4635 return iemRaiseGeneralProtectionFault0(pVCpu);
4636
4637#ifndef IEM_WITH_CODE_TLB
4638 /* Flush the prefetch buffer. */
4639 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4640#endif
4641
4642 /*
4643 * Clear RF and finish the instruction (maybe raise #DB).
4644 */
4645 return iemRegFinishClearingRF(pVCpu);
4646}
4647
4648
4649/**
4650 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4651 *
4652 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4653 * segment limit.
4654 *
4655 * @returns Strict VBox status code.
4656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4657 * @param cbInstr Instruction size.
4658 * @param offNextInstr The offset of the next instruction.
4659 * @param enmEffOpSize Effective operand size.
4660 */
4661VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4662 IEMMODE enmEffOpSize) RT_NOEXCEPT
4663{
4664 if (enmEffOpSize == IEMMODE_32BIT)
4665 {
4666 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4667
4668 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4669 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4670 pVCpu->cpum.GstCtx.rip = uNewEip;
4671 else
4672 return iemRaiseGeneralProtectionFault0(pVCpu);
4673 }
4674 else
4675 {
4676 Assert(enmEffOpSize == IEMMODE_64BIT);
4677
4678 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4679 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4680 pVCpu->cpum.GstCtx.rip = uNewRip;
4681 else
4682 return iemRaiseGeneralProtectionFault0(pVCpu);
4683 }
4684
4685#ifndef IEM_WITH_CODE_TLB
4686 /* Flush the prefetch buffer. */
4687 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4688#endif
4689
4690 /*
4691 * Clear RF and finish the instruction (maybe raise #DB).
4692 */
4693 return iemRegFinishClearingRF(pVCpu);
4694}
4695
4696
4697/**
4698 * Performs a near jump to the specified address.
4699 *
4700 * May raise a \#GP(0) if the new IP outside the code segment limit.
4701 *
4702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4703 * @param uNewIp The new IP value.
4704 */
4705VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4706{
4707 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4708 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4709 pVCpu->cpum.GstCtx.rip = uNewIp;
4710 else
4711 return iemRaiseGeneralProtectionFault0(pVCpu);
4712 /** @todo Test 16-bit jump in 64-bit mode. */
4713
4714#ifndef IEM_WITH_CODE_TLB
4715 /* Flush the prefetch buffer. */
4716 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4717#endif
4718
4719 /*
4720 * Clear RF and finish the instruction (maybe raise #DB).
4721 */
4722 return iemRegFinishClearingRF(pVCpu);
4723}
4724
4725
4726/**
4727 * Performs a near jump to the specified address.
4728 *
4729 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4730 *
4731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4732 * @param uNewEip The new EIP value.
4733 */
4734VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4735{
4736 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4737 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4738
4739 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4740 pVCpu->cpum.GstCtx.rip = uNewEip;
4741 else
4742 return iemRaiseGeneralProtectionFault0(pVCpu);
4743
4744#ifndef IEM_WITH_CODE_TLB
4745 /* Flush the prefetch buffer. */
4746 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4747#endif
4748
4749 /*
4750 * Clear RF and finish the instruction (maybe raise #DB).
4751 */
4752 return iemRegFinishClearingRF(pVCpu);
4753}
4754
4755
4756/**
4757 * Performs a near jump to the specified address.
4758 *
4759 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4760 * segment limit.
4761 *
4762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4763 * @param uNewRip The new RIP value.
4764 */
4765VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4766{
4767 Assert(IEM_IS_64BIT_CODE(pVCpu));
4768
4769 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4770 pVCpu->cpum.GstCtx.rip = uNewRip;
4771 else
4772 return iemRaiseGeneralProtectionFault0(pVCpu);
4773
4774#ifndef IEM_WITH_CODE_TLB
4775 /* Flush the prefetch buffer. */
4776 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4777#endif
4778
4779 /*
4780 * Clear RF and finish the instruction (maybe raise #DB).
4781 */
4782 return iemRegFinishClearingRF(pVCpu);
4783}
4784
4785/** @} */
4786
4787
4788/** @name FPU access and helpers.
4789 *
4790 * @{
4791 */
4792
4793/**
4794 * Updates the x87.DS and FPUDP registers.
4795 *
4796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4797 * @param pFpuCtx The FPU context.
4798 * @param iEffSeg The effective segment register.
4799 * @param GCPtrEff The effective address relative to @a iEffSeg.
4800 */
4801DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4802{
4803 RTSEL sel;
4804 switch (iEffSeg)
4805 {
4806 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4807 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4808 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4809 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4810 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4811 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4812 default:
4813 AssertMsgFailed(("%d\n", iEffSeg));
4814 sel = pVCpu->cpum.GstCtx.ds.Sel;
4815 }
4816 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4817 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4818 {
4819 pFpuCtx->DS = 0;
4820 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4821 }
4822 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4823 {
4824 pFpuCtx->DS = sel;
4825 pFpuCtx->FPUDP = GCPtrEff;
4826 }
4827 else
4828 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4829}
4830
4831
4832/**
4833 * Rotates the stack registers in the push direction.
4834 *
4835 * @param pFpuCtx The FPU context.
4836 * @remarks This is a complete waste of time, but fxsave stores the registers in
4837 * stack order.
4838 */
4839DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4840{
4841 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4842 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4843 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4844 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4845 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4846 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4847 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4848 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4849 pFpuCtx->aRegs[0].r80 = r80Tmp;
4850}
4851
4852
4853/**
4854 * Rotates the stack registers in the pop direction.
4855 *
4856 * @param pFpuCtx The FPU context.
4857 * @remarks This is a complete waste of time, but fxsave stores the registers in
4858 * stack order.
4859 */
4860DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4861{
4862 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4863 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4864 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4865 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4866 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4867 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4868 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4869 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4870 pFpuCtx->aRegs[7].r80 = r80Tmp;
4871}
4872
4873
4874/**
4875 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4876 * exception prevents it.
4877 *
4878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4879 * @param pResult The FPU operation result to push.
4880 * @param pFpuCtx The FPU context.
4881 */
4882static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4883{
4884 /* Update FSW and bail if there are pending exceptions afterwards. */
4885 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4886 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4887 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4888 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4889 {
4890 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4891 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4892 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4893 pFpuCtx->FSW = fFsw;
4894 return;
4895 }
4896
4897 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4898 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4899 {
4900 /* All is fine, push the actual value. */
4901 pFpuCtx->FTW |= RT_BIT(iNewTop);
4902 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4903 }
4904 else if (pFpuCtx->FCW & X86_FCW_IM)
4905 {
4906 /* Masked stack overflow, push QNaN. */
4907 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4908 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4909 }
4910 else
4911 {
4912 /* Raise stack overflow, don't push anything. */
4913 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4914 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4915 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4916 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4917 return;
4918 }
4919
4920 fFsw &= ~X86_FSW_TOP_MASK;
4921 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4922 pFpuCtx->FSW = fFsw;
4923
4924 iemFpuRotateStackPush(pFpuCtx);
4925 RT_NOREF(pVCpu);
4926}
4927
4928
4929/**
4930 * Stores a result in a FPU register and updates the FSW and FTW.
4931 *
4932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4933 * @param pFpuCtx The FPU context.
4934 * @param pResult The result to store.
4935 * @param iStReg Which FPU register to store it in.
4936 */
4937static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4938{
4939 Assert(iStReg < 8);
4940 uint16_t fNewFsw = pFpuCtx->FSW;
4941 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4942 fNewFsw &= ~X86_FSW_C_MASK;
4943 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4944 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4945 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4946 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4947 pFpuCtx->FSW = fNewFsw;
4948 pFpuCtx->FTW |= RT_BIT(iReg);
4949 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4950 RT_NOREF(pVCpu);
4951}
4952
4953
4954/**
4955 * Only updates the FPU status word (FSW) with the result of the current
4956 * instruction.
4957 *
4958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4959 * @param pFpuCtx The FPU context.
4960 * @param u16FSW The FSW output of the current instruction.
4961 */
4962static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4963{
4964 uint16_t fNewFsw = pFpuCtx->FSW;
4965 fNewFsw &= ~X86_FSW_C_MASK;
4966 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4967 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4968 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4969 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4970 pFpuCtx->FSW = fNewFsw;
4971 RT_NOREF(pVCpu);
4972}
4973
4974
4975/**
4976 * Pops one item off the FPU stack if no pending exception prevents it.
4977 *
4978 * @param pFpuCtx The FPU context.
4979 */
4980static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4981{
4982 /* Check pending exceptions. */
4983 uint16_t uFSW = pFpuCtx->FSW;
4984 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4985 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4986 return;
4987
4988 /* TOP--. */
4989 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4990 uFSW &= ~X86_FSW_TOP_MASK;
4991 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4992 pFpuCtx->FSW = uFSW;
4993
4994 /* Mark the previous ST0 as empty. */
4995 iOldTop >>= X86_FSW_TOP_SHIFT;
4996 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4997
4998 /* Rotate the registers. */
4999 iemFpuRotateStackPop(pFpuCtx);
5000}
5001
5002
5003/**
5004 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5005 *
5006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5007 * @param pResult The FPU operation result to push.
5008 * @param uFpuOpcode The FPU opcode value.
5009 */
5010void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5011{
5012 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5013 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5014 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5015}
5016
5017
5018/**
5019 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5020 * and sets FPUDP and FPUDS.
5021 *
5022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5023 * @param pResult The FPU operation result to push.
5024 * @param iEffSeg The effective segment register.
5025 * @param GCPtrEff The effective address relative to @a iEffSeg.
5026 * @param uFpuOpcode The FPU opcode value.
5027 */
5028void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5029 uint16_t uFpuOpcode) RT_NOEXCEPT
5030{
5031 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5032 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5033 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5034 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5035}
5036
5037
5038/**
5039 * Replace ST0 with the first value and push the second onto the FPU stack,
5040 * unless a pending exception prevents it.
5041 *
5042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5043 * @param pResult The FPU operation result to store and push.
5044 * @param uFpuOpcode The FPU opcode value.
5045 */
5046void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5047{
5048 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5049 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5050
5051 /* Update FSW and bail if there are pending exceptions afterwards. */
5052 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5053 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5054 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5055 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5056 {
5057 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5058 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5059 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5060 pFpuCtx->FSW = fFsw;
5061 return;
5062 }
5063
5064 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5065 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5066 {
5067 /* All is fine, push the actual value. */
5068 pFpuCtx->FTW |= RT_BIT(iNewTop);
5069 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5070 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5071 }
5072 else if (pFpuCtx->FCW & X86_FCW_IM)
5073 {
5074 /* Masked stack overflow, push QNaN. */
5075 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5076 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5077 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5078 }
5079 else
5080 {
5081 /* Raise stack overflow, don't push anything. */
5082 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5083 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5084 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5085 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5086 return;
5087 }
5088
5089 fFsw &= ~X86_FSW_TOP_MASK;
5090 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5091 pFpuCtx->FSW = fFsw;
5092
5093 iemFpuRotateStackPush(pFpuCtx);
5094}
5095
5096
5097/**
5098 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5099 * FOP.
5100 *
5101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5102 * @param pResult The result to store.
5103 * @param iStReg Which FPU register to store it in.
5104 * @param uFpuOpcode The FPU opcode value.
5105 */
5106void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5107{
5108 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5109 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5110 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5111}
5112
5113
5114/**
5115 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5116 * FOP, and then pops the stack.
5117 *
5118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5119 * @param pResult The result to store.
5120 * @param iStReg Which FPU register to store it in.
5121 * @param uFpuOpcode The FPU opcode value.
5122 */
5123void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5124{
5125 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5126 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5127 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5128 iemFpuMaybePopOne(pFpuCtx);
5129}
5130
5131
5132/**
5133 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5134 * FPUDP, and FPUDS.
5135 *
5136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5137 * @param pResult The result to store.
5138 * @param iStReg Which FPU register to store it in.
5139 * @param iEffSeg The effective memory operand selector register.
5140 * @param GCPtrEff The effective memory operand offset.
5141 * @param uFpuOpcode The FPU opcode value.
5142 */
5143void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5144 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5145{
5146 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5147 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5148 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5149 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5150}
5151
5152
5153/**
5154 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5155 * FPUDP, and FPUDS, and then pops the stack.
5156 *
5157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5158 * @param pResult The result to store.
5159 * @param iStReg Which FPU register to store it in.
5160 * @param iEffSeg The effective memory operand selector register.
5161 * @param GCPtrEff The effective memory operand offset.
5162 * @param uFpuOpcode The FPU opcode value.
5163 */
5164void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5165 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5166{
5167 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5168 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5169 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5170 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5171 iemFpuMaybePopOne(pFpuCtx);
5172}
5173
5174
5175/**
5176 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5177 *
5178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5179 * @param uFpuOpcode The FPU opcode value.
5180 */
5181void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5182{
5183 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5184 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5185}
5186
5187
5188/**
5189 * Updates the FSW, FOP, FPUIP, and FPUCS.
5190 *
5191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5192 * @param u16FSW The FSW from the current instruction.
5193 * @param uFpuOpcode The FPU opcode value.
5194 */
5195void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5196{
5197 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5198 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5199 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5200}
5201
5202
5203/**
5204 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5205 *
5206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5207 * @param u16FSW The FSW from the current instruction.
5208 * @param uFpuOpcode The FPU opcode value.
5209 */
5210void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5211{
5212 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5213 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5214 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5215 iemFpuMaybePopOne(pFpuCtx);
5216}
5217
5218
5219/**
5220 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5221 *
5222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5223 * @param u16FSW The FSW from the current instruction.
5224 * @param iEffSeg The effective memory operand selector register.
5225 * @param GCPtrEff The effective memory operand offset.
5226 * @param uFpuOpcode The FPU opcode value.
5227 */
5228void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5229{
5230 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5231 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5232 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5233 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5234}
5235
5236
5237/**
5238 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5239 *
5240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5241 * @param u16FSW The FSW from the current instruction.
5242 * @param uFpuOpcode The FPU opcode value.
5243 */
5244void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5245{
5246 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5247 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5248 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5249 iemFpuMaybePopOne(pFpuCtx);
5250 iemFpuMaybePopOne(pFpuCtx);
5251}
5252
5253
5254/**
5255 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5256 *
5257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5258 * @param u16FSW The FSW from the current instruction.
5259 * @param iEffSeg The effective memory operand selector register.
5260 * @param GCPtrEff The effective memory operand offset.
5261 * @param uFpuOpcode The FPU opcode value.
5262 */
5263void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5264 uint16_t uFpuOpcode) RT_NOEXCEPT
5265{
5266 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5267 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5268 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5269 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5270 iemFpuMaybePopOne(pFpuCtx);
5271}
5272
5273
5274/**
5275 * Worker routine for raising an FPU stack underflow exception.
5276 *
5277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5278 * @param pFpuCtx The FPU context.
5279 * @param iStReg The stack register being accessed.
5280 */
5281static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5282{
5283 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5284 if (pFpuCtx->FCW & X86_FCW_IM)
5285 {
5286 /* Masked underflow. */
5287 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5288 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5289 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5290 if (iStReg != UINT8_MAX)
5291 {
5292 pFpuCtx->FTW |= RT_BIT(iReg);
5293 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5294 }
5295 }
5296 else
5297 {
5298 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5299 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5300 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5301 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5302 }
5303 RT_NOREF(pVCpu);
5304}
5305
5306
5307/**
5308 * Raises a FPU stack underflow exception.
5309 *
5310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5311 * @param iStReg The destination register that should be loaded
5312 * with QNaN if \#IS is not masked. Specify
5313 * UINT8_MAX if none (like for fcom).
5314 * @param uFpuOpcode The FPU opcode value.
5315 */
5316void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5317{
5318 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5319 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5320 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5321}
5322
5323
5324void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5325{
5326 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5327 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5328 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5329 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5330}
5331
5332
5333void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5334{
5335 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5336 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5337 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5338 iemFpuMaybePopOne(pFpuCtx);
5339}
5340
5341
5342void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5343 uint16_t uFpuOpcode) RT_NOEXCEPT
5344{
5345 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5346 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5347 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5348 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5349 iemFpuMaybePopOne(pFpuCtx);
5350}
5351
5352
5353void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5354{
5355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5356 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5357 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5358 iemFpuMaybePopOne(pFpuCtx);
5359 iemFpuMaybePopOne(pFpuCtx);
5360}
5361
5362
5363void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5364{
5365 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5366 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5367
5368 if (pFpuCtx->FCW & X86_FCW_IM)
5369 {
5370 /* Masked overflow - Push QNaN. */
5371 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5372 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5373 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5374 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5375 pFpuCtx->FTW |= RT_BIT(iNewTop);
5376 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5377 iemFpuRotateStackPush(pFpuCtx);
5378 }
5379 else
5380 {
5381 /* Exception pending - don't change TOP or the register stack. */
5382 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5383 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5384 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5385 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5386 }
5387}
5388
5389
5390void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5391{
5392 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5393 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5394
5395 if (pFpuCtx->FCW & X86_FCW_IM)
5396 {
5397 /* Masked overflow - Push QNaN. */
5398 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5399 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5400 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5401 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5402 pFpuCtx->FTW |= RT_BIT(iNewTop);
5403 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5404 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5405 iemFpuRotateStackPush(pFpuCtx);
5406 }
5407 else
5408 {
5409 /* Exception pending - don't change TOP or the register stack. */
5410 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5411 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5412 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5413 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5414 }
5415}
5416
5417
5418/**
5419 * Worker routine for raising an FPU stack overflow exception on a push.
5420 *
5421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5422 * @param pFpuCtx The FPU context.
5423 */
5424static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5425{
5426 if (pFpuCtx->FCW & X86_FCW_IM)
5427 {
5428 /* Masked overflow. */
5429 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5430 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5431 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5432 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5433 pFpuCtx->FTW |= RT_BIT(iNewTop);
5434 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5435 iemFpuRotateStackPush(pFpuCtx);
5436 }
5437 else
5438 {
5439 /* Exception pending - don't change TOP or the register stack. */
5440 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5441 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5442 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5443 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5444 }
5445 RT_NOREF(pVCpu);
5446}
5447
5448
5449/**
5450 * Raises a FPU stack overflow exception on a push.
5451 *
5452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5453 * @param uFpuOpcode The FPU opcode value.
5454 */
5455void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5456{
5457 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5458 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5459 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5460}
5461
5462
5463/**
5464 * Raises a FPU stack overflow exception on a push with a memory operand.
5465 *
5466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5467 * @param iEffSeg The effective memory operand selector register.
5468 * @param GCPtrEff The effective memory operand offset.
5469 * @param uFpuOpcode The FPU opcode value.
5470 */
5471void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5472{
5473 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5474 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5475 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5476 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5477}
5478
5479/** @} */
5480
5481
5482/** @name SSE+AVX SIMD access and helpers.
5483 *
5484 * @{
5485 */
5486/**
5487 * Stores a result in a SIMD XMM register, updates the MXCSR.
5488 *
5489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5490 * @param pResult The result to store.
5491 * @param iXmmReg Which SIMD XMM register to store the result in.
5492 */
5493void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5494{
5495 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5496 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5497
5498 /* The result is only updated if there is no unmasked exception pending. */
5499 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5500 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5501 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5502}
5503
5504
5505/**
5506 * Updates the MXCSR.
5507 *
5508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5509 * @param fMxcsr The new MXCSR value.
5510 */
5511void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5512{
5513 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5514 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5515}
5516/** @} */
5517
5518
5519/** @name Memory access.
5520 *
5521 * @{
5522 */
5523
5524#undef LOG_GROUP
5525#define LOG_GROUP LOG_GROUP_IEM_MEM
5526
5527/**
5528 * Updates the IEMCPU::cbWritten counter if applicable.
5529 *
5530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5531 * @param fAccess The access being accounted for.
5532 * @param cbMem The access size.
5533 */
5534DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5535{
5536 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5537 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5538 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5539}
5540
5541
5542/**
5543 * Applies the segment limit, base and attributes.
5544 *
5545 * This may raise a \#GP or \#SS.
5546 *
5547 * @returns VBox strict status code.
5548 *
5549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5550 * @param fAccess The kind of access which is being performed.
5551 * @param iSegReg The index of the segment register to apply.
5552 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5553 * TSS, ++).
5554 * @param cbMem The access size.
5555 * @param pGCPtrMem Pointer to the guest memory address to apply
5556 * segmentation to. Input and output parameter.
5557 */
5558VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5559{
5560 if (iSegReg == UINT8_MAX)
5561 return VINF_SUCCESS;
5562
5563 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5564 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5565 switch (IEM_GET_CPU_MODE(pVCpu))
5566 {
5567 case IEMMODE_16BIT:
5568 case IEMMODE_32BIT:
5569 {
5570 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5571 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5572
5573 if ( pSel->Attr.n.u1Present
5574 && !pSel->Attr.n.u1Unusable)
5575 {
5576 Assert(pSel->Attr.n.u1DescType);
5577 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5578 {
5579 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5580 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5581 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5582
5583 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5584 {
5585 /** @todo CPL check. */
5586 }
5587
5588 /*
5589 * There are two kinds of data selectors, normal and expand down.
5590 */
5591 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5592 {
5593 if ( GCPtrFirst32 > pSel->u32Limit
5594 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5595 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5596 }
5597 else
5598 {
5599 /*
5600 * The upper boundary is defined by the B bit, not the G bit!
5601 */
5602 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5603 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5604 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5605 }
5606 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5607 }
5608 else
5609 {
5610 /*
5611 * Code selector and usually be used to read thru, writing is
5612 * only permitted in real and V8086 mode.
5613 */
5614 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5615 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5616 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5617 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5618 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5619
5620 if ( GCPtrFirst32 > pSel->u32Limit
5621 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5622 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5623
5624 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5625 {
5626 /** @todo CPL check. */
5627 }
5628
5629 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5630 }
5631 }
5632 else
5633 return iemRaiseGeneralProtectionFault0(pVCpu);
5634 return VINF_SUCCESS;
5635 }
5636
5637 case IEMMODE_64BIT:
5638 {
5639 RTGCPTR GCPtrMem = *pGCPtrMem;
5640 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5641 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5642
5643 Assert(cbMem >= 1);
5644 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5645 return VINF_SUCCESS;
5646 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5647 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5648 return iemRaiseGeneralProtectionFault0(pVCpu);
5649 }
5650
5651 default:
5652 AssertFailedReturn(VERR_IEM_IPE_7);
5653 }
5654}
5655
5656
5657/**
5658 * Translates a virtual address to a physical physical address and checks if we
5659 * can access the page as specified.
5660 *
5661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5662 * @param GCPtrMem The virtual address.
5663 * @param cbAccess The access size, for raising \#PF correctly for
5664 * FXSAVE and such.
5665 * @param fAccess The intended access.
5666 * @param pGCPhysMem Where to return the physical address.
5667 */
5668VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5669 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5670{
5671 /** @todo Need a different PGM interface here. We're currently using
5672 * generic / REM interfaces. this won't cut it for R0. */
5673 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5674 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5675 * here. */
5676 PGMPTWALK Walk;
5677 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5678 if (RT_FAILURE(rc))
5679 {
5680 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5681 /** @todo Check unassigned memory in unpaged mode. */
5682 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5683#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5684 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5685 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5686#endif
5687 *pGCPhysMem = NIL_RTGCPHYS;
5688 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5689 }
5690
5691 /* If the page is writable and does not have the no-exec bit set, all
5692 access is allowed. Otherwise we'll have to check more carefully... */
5693 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5694 {
5695 /* Write to read only memory? */
5696 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5697 && !(Walk.fEffective & X86_PTE_RW)
5698 && ( ( IEM_GET_CPL(pVCpu) == 3
5699 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5700 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5701 {
5702 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5703 *pGCPhysMem = NIL_RTGCPHYS;
5704#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5705 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5706 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5707#endif
5708 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5709 }
5710
5711 /* Kernel memory accessed by userland? */
5712 if ( !(Walk.fEffective & X86_PTE_US)
5713 && IEM_GET_CPL(pVCpu) == 3
5714 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5715 {
5716 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5717 *pGCPhysMem = NIL_RTGCPHYS;
5718#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5719 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5720 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5721#endif
5722 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5723 }
5724
5725 /* Executing non-executable memory? */
5726 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5727 && (Walk.fEffective & X86_PTE_PAE_NX)
5728 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5729 {
5730 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5731 *pGCPhysMem = NIL_RTGCPHYS;
5732#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5733 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5734 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5735#endif
5736 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5737 VERR_ACCESS_DENIED);
5738 }
5739 }
5740
5741 /*
5742 * Set the dirty / access flags.
5743 * ASSUMES this is set when the address is translated rather than on committ...
5744 */
5745 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5746 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5747 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5748 {
5749 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5750 AssertRC(rc2);
5751 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5752 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5753 }
5754
5755 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5756 *pGCPhysMem = GCPhys;
5757 return VINF_SUCCESS;
5758}
5759
5760
5761/**
5762 * Looks up a memory mapping entry.
5763 *
5764 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5766 * @param pvMem The memory address.
5767 * @param fAccess The access to.
5768 */
5769DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5770{
5771 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5772 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5773 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5774 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5775 return 0;
5776 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5777 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5778 return 1;
5779 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5780 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5781 return 2;
5782 return VERR_NOT_FOUND;
5783}
5784
5785
5786/**
5787 * Finds a free memmap entry when using iNextMapping doesn't work.
5788 *
5789 * @returns Memory mapping index, 1024 on failure.
5790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5791 */
5792static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5793{
5794 /*
5795 * The easy case.
5796 */
5797 if (pVCpu->iem.s.cActiveMappings == 0)
5798 {
5799 pVCpu->iem.s.iNextMapping = 1;
5800 return 0;
5801 }
5802
5803 /* There should be enough mappings for all instructions. */
5804 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5805
5806 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5807 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5808 return i;
5809
5810 AssertFailedReturn(1024);
5811}
5812
5813
5814/**
5815 * Commits a bounce buffer that needs writing back and unmaps it.
5816 *
5817 * @returns Strict VBox status code.
5818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5819 * @param iMemMap The index of the buffer to commit.
5820 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5821 * Always false in ring-3, obviously.
5822 */
5823static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5824{
5825 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5826 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5827#ifdef IN_RING3
5828 Assert(!fPostponeFail);
5829 RT_NOREF_PV(fPostponeFail);
5830#endif
5831
5832 /*
5833 * Do the writing.
5834 */
5835 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5836 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5837 {
5838 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5839 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5840 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5841 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5842 {
5843 /*
5844 * Carefully and efficiently dealing with access handler return
5845 * codes make this a little bloated.
5846 */
5847 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5848 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5849 pbBuf,
5850 cbFirst,
5851 PGMACCESSORIGIN_IEM);
5852 if (rcStrict == VINF_SUCCESS)
5853 {
5854 if (cbSecond)
5855 {
5856 rcStrict = PGMPhysWrite(pVM,
5857 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5858 pbBuf + cbFirst,
5859 cbSecond,
5860 PGMACCESSORIGIN_IEM);
5861 if (rcStrict == VINF_SUCCESS)
5862 { /* nothing */ }
5863 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5864 {
5865 LogEx(LOG_GROUP_IEM,
5866 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5868 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5869 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5870 }
5871#ifndef IN_RING3
5872 else if (fPostponeFail)
5873 {
5874 LogEx(LOG_GROUP_IEM,
5875 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5876 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5877 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5878 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5879 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5880 return iemSetPassUpStatus(pVCpu, rcStrict);
5881 }
5882#endif
5883 else
5884 {
5885 LogEx(LOG_GROUP_IEM,
5886 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5887 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5889 return rcStrict;
5890 }
5891 }
5892 }
5893 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5894 {
5895 if (!cbSecond)
5896 {
5897 LogEx(LOG_GROUP_IEM,
5898 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5899 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5900 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5901 }
5902 else
5903 {
5904 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5906 pbBuf + cbFirst,
5907 cbSecond,
5908 PGMACCESSORIGIN_IEM);
5909 if (rcStrict2 == VINF_SUCCESS)
5910 {
5911 LogEx(LOG_GROUP_IEM,
5912 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5913 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5914 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5915 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5916 }
5917 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5918 {
5919 LogEx(LOG_GROUP_IEM,
5920 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5922 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5923 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5924 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5925 }
5926#ifndef IN_RING3
5927 else if (fPostponeFail)
5928 {
5929 LogEx(LOG_GROUP_IEM,
5930 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5931 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5933 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5934 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5935 return iemSetPassUpStatus(pVCpu, rcStrict);
5936 }
5937#endif
5938 else
5939 {
5940 LogEx(LOG_GROUP_IEM,
5941 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5943 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5944 return rcStrict2;
5945 }
5946 }
5947 }
5948#ifndef IN_RING3
5949 else if (fPostponeFail)
5950 {
5951 LogEx(LOG_GROUP_IEM,
5952 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5954 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5955 if (!cbSecond)
5956 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5957 else
5958 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5959 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5960 return iemSetPassUpStatus(pVCpu, rcStrict);
5961 }
5962#endif
5963 else
5964 {
5965 LogEx(LOG_GROUP_IEM,
5966 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5967 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5968 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5969 return rcStrict;
5970 }
5971 }
5972 else
5973 {
5974 /*
5975 * No access handlers, much simpler.
5976 */
5977 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5978 if (RT_SUCCESS(rc))
5979 {
5980 if (cbSecond)
5981 {
5982 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5983 if (RT_SUCCESS(rc))
5984 { /* likely */ }
5985 else
5986 {
5987 LogEx(LOG_GROUP_IEM,
5988 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5989 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5990 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5991 return rc;
5992 }
5993 }
5994 }
5995 else
5996 {
5997 LogEx(LOG_GROUP_IEM,
5998 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5999 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6000 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6001 return rc;
6002 }
6003 }
6004 }
6005
6006#if defined(IEM_LOG_MEMORY_WRITES)
6007 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6008 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6009 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6010 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6011 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6012 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6013
6014 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6015 g_cbIemWrote = cbWrote;
6016 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6017#endif
6018
6019 /*
6020 * Free the mapping entry.
6021 */
6022 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6023 Assert(pVCpu->iem.s.cActiveMappings != 0);
6024 pVCpu->iem.s.cActiveMappings--;
6025 return VINF_SUCCESS;
6026}
6027
6028
6029/**
6030 * iemMemMap worker that deals with a request crossing pages.
6031 */
6032static VBOXSTRICTRC
6033iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6034{
6035 Assert(cbMem <= GUEST_PAGE_SIZE);
6036
6037 /*
6038 * Do the address translations.
6039 */
6040 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6041 RTGCPHYS GCPhysFirst;
6042 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6043 if (rcStrict != VINF_SUCCESS)
6044 return rcStrict;
6045 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6046
6047 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6048 RTGCPHYS GCPhysSecond;
6049 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6050 cbSecondPage, fAccess, &GCPhysSecond);
6051 if (rcStrict != VINF_SUCCESS)
6052 return rcStrict;
6053 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6054 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6055
6056 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6057
6058 /*
6059 * Read in the current memory content if it's a read, execute or partial
6060 * write access.
6061 */
6062 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6063
6064 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6065 {
6066 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6067 {
6068 /*
6069 * Must carefully deal with access handler status codes here,
6070 * makes the code a bit bloated.
6071 */
6072 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6073 if (rcStrict == VINF_SUCCESS)
6074 {
6075 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6076 if (rcStrict == VINF_SUCCESS)
6077 { /*likely */ }
6078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6079 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6080 else
6081 {
6082 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6083 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6084 return rcStrict;
6085 }
6086 }
6087 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6088 {
6089 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6090 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6091 {
6092 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6093 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6094 }
6095 else
6096 {
6097 LogEx(LOG_GROUP_IEM,
6098 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6099 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6100 return rcStrict2;
6101 }
6102 }
6103 else
6104 {
6105 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6106 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6107 return rcStrict;
6108 }
6109 }
6110 else
6111 {
6112 /*
6113 * No informational status codes here, much more straight forward.
6114 */
6115 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6116 if (RT_SUCCESS(rc))
6117 {
6118 Assert(rc == VINF_SUCCESS);
6119 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6120 if (RT_SUCCESS(rc))
6121 Assert(rc == VINF_SUCCESS);
6122 else
6123 {
6124 LogEx(LOG_GROUP_IEM,
6125 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6126 return rc;
6127 }
6128 }
6129 else
6130 {
6131 LogEx(LOG_GROUP_IEM,
6132 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6133 return rc;
6134 }
6135 }
6136 }
6137#ifdef VBOX_STRICT
6138 else
6139 memset(pbBuf, 0xcc, cbMem);
6140 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6141 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6142#endif
6143 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6144
6145 /*
6146 * Commit the bounce buffer entry.
6147 */
6148 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6149 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6150 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6151 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6152 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6153 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6154 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6155 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6156 pVCpu->iem.s.cActiveMappings++;
6157
6158 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6159 *ppvMem = pbBuf;
6160 return VINF_SUCCESS;
6161}
6162
6163
6164/**
6165 * iemMemMap woker that deals with iemMemPageMap failures.
6166 */
6167static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6168 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6169{
6170 /*
6171 * Filter out conditions we can handle and the ones which shouldn't happen.
6172 */
6173 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6174 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6175 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6176 {
6177 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6178 return rcMap;
6179 }
6180 pVCpu->iem.s.cPotentialExits++;
6181
6182 /*
6183 * Read in the current memory content if it's a read, execute or partial
6184 * write access.
6185 */
6186 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6187 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6188 {
6189 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6190 memset(pbBuf, 0xff, cbMem);
6191 else
6192 {
6193 int rc;
6194 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6195 {
6196 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6197 if (rcStrict == VINF_SUCCESS)
6198 { /* nothing */ }
6199 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6200 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6201 else
6202 {
6203 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6204 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6205 return rcStrict;
6206 }
6207 }
6208 else
6209 {
6210 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6211 if (RT_SUCCESS(rc))
6212 { /* likely */ }
6213 else
6214 {
6215 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6216 GCPhysFirst, rc));
6217 return rc;
6218 }
6219 }
6220 }
6221 }
6222#ifdef VBOX_STRICT
6223 else
6224 memset(pbBuf, 0xcc, cbMem);
6225#endif
6226#ifdef VBOX_STRICT
6227 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6228 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6229#endif
6230
6231 /*
6232 * Commit the bounce buffer entry.
6233 */
6234 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6235 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6236 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6237 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6238 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6239 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6240 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6241 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6242 pVCpu->iem.s.cActiveMappings++;
6243
6244 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6245 *ppvMem = pbBuf;
6246 return VINF_SUCCESS;
6247}
6248
6249
6250
6251/**
6252 * Maps the specified guest memory for the given kind of access.
6253 *
6254 * This may be using bounce buffering of the memory if it's crossing a page
6255 * boundary or if there is an access handler installed for any of it. Because
6256 * of lock prefix guarantees, we're in for some extra clutter when this
6257 * happens.
6258 *
6259 * This may raise a \#GP, \#SS, \#PF or \#AC.
6260 *
6261 * @returns VBox strict status code.
6262 *
6263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6264 * @param ppvMem Where to return the pointer to the mapped memory.
6265 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6266 * 8, 12, 16, 32 or 512. When used by string operations
6267 * it can be up to a page.
6268 * @param iSegReg The index of the segment register to use for this
6269 * access. The base and limits are checked. Use UINT8_MAX
6270 * to indicate that no segmentation is required (for IDT,
6271 * GDT and LDT accesses).
6272 * @param GCPtrMem The address of the guest memory.
6273 * @param fAccess How the memory is being accessed. The
6274 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6275 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6276 * when raising exceptions.
6277 * @param uAlignCtl Alignment control:
6278 * - Bits 15:0 is the alignment mask.
6279 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6280 * IEM_MEMMAP_F_ALIGN_SSE, and
6281 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6282 * Pass zero to skip alignment.
6283 */
6284VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6285 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6286{
6287 /*
6288 * Check the input and figure out which mapping entry to use.
6289 */
6290 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6291 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6292 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6293 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6294 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6295
6296 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6297 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6298 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6299 {
6300 iMemMap = iemMemMapFindFree(pVCpu);
6301 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6302 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6303 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6304 pVCpu->iem.s.aMemMappings[2].fAccess),
6305 VERR_IEM_IPE_9);
6306 }
6307
6308 /*
6309 * Map the memory, checking that we can actually access it. If something
6310 * slightly complicated happens, fall back on bounce buffering.
6311 */
6312 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6313 if (rcStrict == VINF_SUCCESS)
6314 { /* likely */ }
6315 else
6316 return rcStrict;
6317
6318 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6319 { /* likely */ }
6320 else
6321 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6322
6323 /*
6324 * Alignment check.
6325 */
6326 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6327 { /* likelyish */ }
6328 else
6329 {
6330 /* Misaligned access. */
6331 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6332 {
6333 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6334 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6335 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6336 {
6337 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6338
6339 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6340 return iemRaiseAlignmentCheckException(pVCpu);
6341 }
6342 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6343 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6344 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6345 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6346 * that's what FXSAVE does on a 10980xe. */
6347 && iemMemAreAlignmentChecksEnabled(pVCpu))
6348 return iemRaiseAlignmentCheckException(pVCpu);
6349 else
6350 return iemRaiseGeneralProtectionFault0(pVCpu);
6351 }
6352 }
6353
6354#ifdef IEM_WITH_DATA_TLB
6355 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6356
6357 /*
6358 * Get the TLB entry for this page.
6359 */
6360 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6361 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6362 if (pTlbe->uTag == uTag)
6363 {
6364# ifdef VBOX_WITH_STATISTICS
6365 pVCpu->iem.s.DataTlb.cTlbHits++;
6366# endif
6367 }
6368 else
6369 {
6370 pVCpu->iem.s.DataTlb.cTlbMisses++;
6371 PGMPTWALK Walk;
6372 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6373 if (RT_FAILURE(rc))
6374 {
6375 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6376# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6377 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6378 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6379# endif
6380 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6381 }
6382
6383 Assert(Walk.fSucceeded);
6384 pTlbe->uTag = uTag;
6385 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6386 pTlbe->GCPhys = Walk.GCPhys;
6387 pTlbe->pbMappingR3 = NULL;
6388 }
6389
6390 /*
6391 * Check TLB page table level access flags.
6392 */
6393 /* If the page is either supervisor only or non-writable, we need to do
6394 more careful access checks. */
6395 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6396 {
6397 /* Write to read only memory? */
6398 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6399 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6400 && ( ( IEM_GET_CPL(pVCpu) == 3
6401 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6402 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6403 {
6404 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6405# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6406 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6407 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6408# endif
6409 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6410 }
6411
6412 /* Kernel memory accessed by userland? */
6413 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6414 && IEM_GET_CPL(pVCpu) == 3
6415 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6416 {
6417 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6418# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6419 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6420 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6421# endif
6422 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6423 }
6424 }
6425
6426 /*
6427 * Set the dirty / access flags.
6428 * ASSUMES this is set when the address is translated rather than on commit...
6429 */
6430 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6431 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6432 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6433 {
6434 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6435 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6436 AssertRC(rc2);
6437 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6438 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6439 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6440 }
6441
6442 /*
6443 * Look up the physical page info if necessary.
6444 */
6445 uint8_t *pbMem = NULL;
6446 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6447# ifdef IN_RING3
6448 pbMem = pTlbe->pbMappingR3;
6449# else
6450 pbMem = NULL;
6451# endif
6452 else
6453 {
6454 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6455 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6456 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6457 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6458 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6459 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6460 { /* likely */ }
6461 else
6462 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6463 pTlbe->pbMappingR3 = NULL;
6464 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6465 | IEMTLBE_F_NO_MAPPINGR3
6466 | IEMTLBE_F_PG_NO_READ
6467 | IEMTLBE_F_PG_NO_WRITE
6468 | IEMTLBE_F_PG_UNASSIGNED
6469 | IEMTLBE_F_PG_CODE_PAGE);
6470 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6471 &pbMem, &pTlbe->fFlagsAndPhysRev);
6472 AssertRCReturn(rc, rc);
6473# ifdef IN_RING3
6474 pTlbe->pbMappingR3 = pbMem;
6475# endif
6476 }
6477
6478 /*
6479 * Check the physical page level access and mapping.
6480 */
6481 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6482 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6483 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6484 { /* probably likely */ }
6485 else
6486 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6487 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6488 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6489 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6490 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6491 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6492
6493 if (pbMem)
6494 {
6495 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6496 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6497 fAccess |= IEM_ACCESS_NOT_LOCKED;
6498 }
6499 else
6500 {
6501 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6502 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6503 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6504 if (rcStrict != VINF_SUCCESS)
6505 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6506 }
6507
6508 void * const pvMem = pbMem;
6509
6510 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6511 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6512 if (fAccess & IEM_ACCESS_TYPE_READ)
6513 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6514
6515#else /* !IEM_WITH_DATA_TLB */
6516
6517 RTGCPHYS GCPhysFirst;
6518 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6519 if (rcStrict != VINF_SUCCESS)
6520 return rcStrict;
6521
6522 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6523 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6524 if (fAccess & IEM_ACCESS_TYPE_READ)
6525 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6526
6527 void *pvMem;
6528 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6529 if (rcStrict != VINF_SUCCESS)
6530 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6531
6532#endif /* !IEM_WITH_DATA_TLB */
6533
6534 /*
6535 * Fill in the mapping table entry.
6536 */
6537 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6538 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6539 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6540 pVCpu->iem.s.cActiveMappings += 1;
6541
6542 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6543 *ppvMem = pvMem;
6544
6545 return VINF_SUCCESS;
6546}
6547
6548
6549/**
6550 * Commits the guest memory if bounce buffered and unmaps it.
6551 *
6552 * @returns Strict VBox status code.
6553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6554 * @param pvMem The mapping.
6555 * @param fAccess The kind of access.
6556 */
6557VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6558{
6559 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6560 AssertReturn(iMemMap >= 0, iMemMap);
6561
6562 /* If it's bounce buffered, we may need to write back the buffer. */
6563 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6564 {
6565 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6566 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6567 }
6568 /* Otherwise unlock it. */
6569 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6570 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6571
6572 /* Free the entry. */
6573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6574 Assert(pVCpu->iem.s.cActiveMappings != 0);
6575 pVCpu->iem.s.cActiveMappings--;
6576 return VINF_SUCCESS;
6577}
6578
6579#ifdef IEM_WITH_SETJMP
6580
6581/**
6582 * Maps the specified guest memory for the given kind of access, longjmp on
6583 * error.
6584 *
6585 * This may be using bounce buffering of the memory if it's crossing a page
6586 * boundary or if there is an access handler installed for any of it. Because
6587 * of lock prefix guarantees, we're in for some extra clutter when this
6588 * happens.
6589 *
6590 * This may raise a \#GP, \#SS, \#PF or \#AC.
6591 *
6592 * @returns Pointer to the mapped memory.
6593 *
6594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6595 * @param cbMem The number of bytes to map. This is usually 1,
6596 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6597 * string operations it can be up to a page.
6598 * @param iSegReg The index of the segment register to use for
6599 * this access. The base and limits are checked.
6600 * Use UINT8_MAX to indicate that no segmentation
6601 * is required (for IDT, GDT and LDT accesses).
6602 * @param GCPtrMem The address of the guest memory.
6603 * @param fAccess How the memory is being accessed. The
6604 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6605 * how to map the memory, while the
6606 * IEM_ACCESS_WHAT_XXX bit is used when raising
6607 * exceptions.
6608 * @param uAlignCtl Alignment control:
6609 * - Bits 15:0 is the alignment mask.
6610 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6611 * IEM_MEMMAP_F_ALIGN_SSE, and
6612 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6613 * Pass zero to skip alignment.
6614 */
6615void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6616 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6617{
6618 /*
6619 * Check the input, check segment access and adjust address
6620 * with segment base.
6621 */
6622 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6623 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6624 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6625
6626 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6627 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6628 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6629
6630 /*
6631 * Alignment check.
6632 */
6633 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6634 { /* likelyish */ }
6635 else
6636 {
6637 /* Misaligned access. */
6638 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6639 {
6640 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6641 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6642 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6643 {
6644 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6645
6646 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6647 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6648 }
6649 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6650 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6651 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6652 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6653 * that's what FXSAVE does on a 10980xe. */
6654 && iemMemAreAlignmentChecksEnabled(pVCpu))
6655 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6656 else
6657 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6658 }
6659 }
6660
6661 /*
6662 * Figure out which mapping entry to use.
6663 */
6664 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6665 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6666 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6667 {
6668 iMemMap = iemMemMapFindFree(pVCpu);
6669 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6670 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6671 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6672 pVCpu->iem.s.aMemMappings[2].fAccess),
6673 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6674 }
6675
6676 /*
6677 * Crossing a page boundary?
6678 */
6679 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6680 { /* No (likely). */ }
6681 else
6682 {
6683 void *pvMem;
6684 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6685 if (rcStrict == VINF_SUCCESS)
6686 return pvMem;
6687 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6688 }
6689
6690#ifdef IEM_WITH_DATA_TLB
6691 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6692
6693 /*
6694 * Get the TLB entry for this page.
6695 */
6696 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6697 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6698 if (pTlbe->uTag == uTag)
6699 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6700 else
6701 {
6702 pVCpu->iem.s.DataTlb.cTlbMisses++;
6703 PGMPTWALK Walk;
6704 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6705 if (RT_FAILURE(rc))
6706 {
6707 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6708# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6709 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6710 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6711# endif
6712 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6713 }
6714
6715 Assert(Walk.fSucceeded);
6716 pTlbe->uTag = uTag;
6717 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6718 pTlbe->GCPhys = Walk.GCPhys;
6719 pTlbe->pbMappingR3 = NULL;
6720 }
6721
6722 /*
6723 * Check the flags and physical revision.
6724 */
6725 /** @todo make the caller pass these in with fAccess. */
6726 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6727 ? IEMTLBE_F_PT_NO_USER : 0;
6728 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6729 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6730 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6731 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6732 ? IEMTLBE_F_PT_NO_WRITE : 0)
6733 : 0;
6734 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6735 uint8_t *pbMem = NULL;
6736 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6737 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6738# ifdef IN_RING3
6739 pbMem = pTlbe->pbMappingR3;
6740# else
6741 pbMem = NULL;
6742# endif
6743 else
6744 {
6745 /*
6746 * Okay, something isn't quite right or needs refreshing.
6747 */
6748 /* Write to read only memory? */
6749 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6750 {
6751 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6752# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6753 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6754 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6755# endif
6756 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6757 }
6758
6759 /* Kernel memory accessed by userland? */
6760 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6761 {
6762 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6763# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6764 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6765 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6766# endif
6767 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6768 }
6769
6770 /* Set the dirty / access flags.
6771 ASSUMES this is set when the address is translated rather than on commit... */
6772 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6773 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6774 {
6775 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6776 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6777 AssertRC(rc2);
6778 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6779 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6780 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6781 }
6782
6783 /*
6784 * Check if the physical page info needs updating.
6785 */
6786 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6787# ifdef IN_RING3
6788 pbMem = pTlbe->pbMappingR3;
6789# else
6790 pbMem = NULL;
6791# endif
6792 else
6793 {
6794 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6795 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6796 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6797 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6798 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6799 pTlbe->pbMappingR3 = NULL;
6800 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6801 | IEMTLBE_F_NO_MAPPINGR3
6802 | IEMTLBE_F_PG_NO_READ
6803 | IEMTLBE_F_PG_NO_WRITE
6804 | IEMTLBE_F_PG_UNASSIGNED
6805 | IEMTLBE_F_PG_CODE_PAGE);
6806 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6807 &pbMem, &pTlbe->fFlagsAndPhysRev);
6808 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6809# ifdef IN_RING3
6810 pTlbe->pbMappingR3 = pbMem;
6811# endif
6812 }
6813
6814 /*
6815 * Check the physical page level access and mapping.
6816 */
6817 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6818 { /* probably likely */ }
6819 else
6820 {
6821 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6822 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6823 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6824 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6825 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6826 if (rcStrict == VINF_SUCCESS)
6827 return pbMem;
6828 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6829 }
6830 }
6831 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6832
6833 if (pbMem)
6834 {
6835 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6836 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6837 fAccess |= IEM_ACCESS_NOT_LOCKED;
6838 }
6839 else
6840 {
6841 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6842 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6843 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6844 if (rcStrict == VINF_SUCCESS)
6845 return pbMem;
6846 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6847 }
6848
6849 void * const pvMem = pbMem;
6850
6851 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6852 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6853 if (fAccess & IEM_ACCESS_TYPE_READ)
6854 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6855
6856#else /* !IEM_WITH_DATA_TLB */
6857
6858
6859 RTGCPHYS GCPhysFirst;
6860 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6861 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6862 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6863
6864 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6865 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6866 if (fAccess & IEM_ACCESS_TYPE_READ)
6867 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6868
6869 void *pvMem;
6870 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6871 if (rcStrict == VINF_SUCCESS)
6872 { /* likely */ }
6873 else
6874 {
6875 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6876 if (rcStrict == VINF_SUCCESS)
6877 return pvMem;
6878 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6879 }
6880
6881#endif /* !IEM_WITH_DATA_TLB */
6882
6883 /*
6884 * Fill in the mapping table entry.
6885 */
6886 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6887 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6888 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6889 pVCpu->iem.s.cActiveMappings++;
6890
6891 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6892 return pvMem;
6893}
6894
6895
6896/**
6897 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6898 *
6899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6900 * @param pvMem The mapping.
6901 * @param fAccess The kind of access.
6902 */
6903void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6904{
6905 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6906 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6907
6908 /* If it's bounce buffered, we may need to write back the buffer. */
6909 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6910 {
6911 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6912 {
6913 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6914 if (rcStrict == VINF_SUCCESS)
6915 return;
6916 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6917 }
6918 }
6919 /* Otherwise unlock it. */
6920 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6921 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6922
6923 /* Free the entry. */
6924 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6925 Assert(pVCpu->iem.s.cActiveMappings != 0);
6926 pVCpu->iem.s.cActiveMappings--;
6927}
6928
6929
6930/** Fallback for iemMemCommitAndUnmapRwJmp. */
6931void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6932{
6933 Assert(bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); RT_NOREF_PV(bMapInfo);
6934 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_RW);
6935}
6936
6937
6938/** Fallback for iemMemCommitAndUnmapWoJmp. */
6939void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6940{
6941 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); RT_NOREF_PV(bMapInfo);
6942 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_W);
6943}
6944
6945
6946/** Fallback for iemMemCommitAndUnmapRoJmp. */
6947void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6948{
6949 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);
6950 iemMemCommitAndUnmapJmp(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);
6951}
6952
6953#endif /* IEM_WITH_SETJMP */
6954
6955#ifndef IN_RING3
6956/**
6957 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6958 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6959 *
6960 * Allows the instruction to be completed and retired, while the IEM user will
6961 * return to ring-3 immediately afterwards and do the postponed writes there.
6962 *
6963 * @returns VBox status code (no strict statuses). Caller must check
6964 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 * @param pvMem The mapping.
6967 * @param fAccess The kind of access.
6968 */
6969VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6970{
6971 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6972 AssertReturn(iMemMap >= 0, iMemMap);
6973
6974 /* If it's bounce buffered, we may need to write back the buffer. */
6975 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6976 {
6977 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6978 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6979 }
6980 /* Otherwise unlock it. */
6981 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6982 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6983
6984 /* Free the entry. */
6985 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6986 Assert(pVCpu->iem.s.cActiveMappings != 0);
6987 pVCpu->iem.s.cActiveMappings--;
6988 return VINF_SUCCESS;
6989}
6990#endif
6991
6992
6993/**
6994 * Rollbacks mappings, releasing page locks and such.
6995 *
6996 * The caller shall only call this after checking cActiveMappings.
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7001{
7002 Assert(pVCpu->iem.s.cActiveMappings > 0);
7003
7004 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7005 while (iMemMap-- > 0)
7006 {
7007 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7008 if (fAccess != IEM_ACCESS_INVALID)
7009 {
7010 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7011 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7012 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7013 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7014 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7015 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7016 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7017 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7018 pVCpu->iem.s.cActiveMappings--;
7019 }
7020 }
7021}
7022
7023
7024/*
7025 * Instantiate R/W templates.
7026 */
7027#define TMPL_MEM_WITH_STACK
7028
7029#define TMPL_MEM_TYPE uint8_t
7030#define TMPL_MEM_FN_SUFF U8
7031#define TMPL_MEM_FMT_TYPE "%#04x"
7032#define TMPL_MEM_FMT_DESC "byte"
7033#include "IEMAllMemRWTmpl.cpp.h"
7034
7035#define TMPL_MEM_TYPE uint16_t
7036#define TMPL_MEM_FN_SUFF U16
7037#define TMPL_MEM_FMT_TYPE "%#06x"
7038#define TMPL_MEM_FMT_DESC "word"
7039#include "IEMAllMemRWTmpl.cpp.h"
7040
7041#define TMPL_WITH_PUSH_SREG
7042#define TMPL_MEM_TYPE uint32_t
7043#define TMPL_MEM_FN_SUFF U32
7044#define TMPL_MEM_FMT_TYPE "%#010x"
7045#define TMPL_MEM_FMT_DESC "dword"
7046#include "IEMAllMemRWTmpl.cpp.h"
7047#undef TMPL_WITH_PUSH_SREG
7048
7049#define TMPL_MEM_TYPE uint64_t
7050#define TMPL_MEM_FN_SUFF U64
7051#define TMPL_MEM_FMT_TYPE "%#018RX64"
7052#define TMPL_MEM_FMT_DESC "qword"
7053#include "IEMAllMemRWTmpl.cpp.h"
7054
7055#undef TMPL_MEM_WITH_STACK
7056
7057#define TMPL_MEM_TYPE uint64_t
7058#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7059#define TMPL_MEM_FN_SUFF U64AlignedU128
7060#define TMPL_MEM_FMT_TYPE "%#018RX64"
7061#define TMPL_MEM_FMT_DESC "qword"
7062#include "IEMAllMemRWTmpl.cpp.h"
7063
7064
7065/**
7066 * Fetches a data dword and zero extends it to a qword.
7067 *
7068 * @returns Strict VBox status code.
7069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7070 * @param pu64Dst Where to return the qword.
7071 * @param iSegReg The index of the segment register to use for
7072 * this access. The base and limits are checked.
7073 * @param GCPtrMem The address of the guest memory.
7074 */
7075VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7076{
7077 /* The lazy approach for now... */
7078 uint32_t const *pu32Src;
7079 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7080 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7081 if (rc == VINF_SUCCESS)
7082 {
7083 *pu64Dst = *pu32Src;
7084 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7085 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7086 }
7087 return rc;
7088}
7089
7090
7091#ifdef SOME_UNUSED_FUNCTION
7092/**
7093 * Fetches a data dword and sign extends it to a qword.
7094 *
7095 * @returns Strict VBox status code.
7096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7097 * @param pu64Dst Where to return the sign extended value.
7098 * @param iSegReg The index of the segment register to use for
7099 * this access. The base and limits are checked.
7100 * @param GCPtrMem The address of the guest memory.
7101 */
7102VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7103{
7104 /* The lazy approach for now... */
7105 int32_t const *pi32Src;
7106 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7107 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7108 if (rc == VINF_SUCCESS)
7109 {
7110 *pu64Dst = *pi32Src;
7111 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7112 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7113 }
7114#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7115 else
7116 *pu64Dst = 0;
7117#endif
7118 return rc;
7119}
7120#endif
7121
7122
7123/**
7124 * Fetches a data tword.
7125 *
7126 * @returns Strict VBox status code.
7127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7128 * @param pr80Dst Where to return the tword.
7129 * @param iSegReg The index of the segment register to use for
7130 * this access. The base and limits are checked.
7131 * @param GCPtrMem The address of the guest memory.
7132 */
7133VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7134{
7135 /* The lazy approach for now... */
7136 PCRTFLOAT80U pr80Src;
7137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7138 if (rc == VINF_SUCCESS)
7139 {
7140 *pr80Dst = *pr80Src;
7141 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7142 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7143 }
7144 return rc;
7145}
7146
7147
7148#ifdef IEM_WITH_SETJMP
7149/**
7150 * Fetches a data tword, longjmp on error.
7151 *
7152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7153 * @param pr80Dst Where to return the tword.
7154 * @param iSegReg The index of the segment register to use for
7155 * this access. The base and limits are checked.
7156 * @param GCPtrMem The address of the guest memory.
7157 */
7158void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7159{
7160 /* The lazy approach for now... */
7161 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7162 *pr80Dst = *pr80Src;
7163 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7164 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7165}
7166#endif
7167
7168
7169/**
7170 * Fetches a data decimal tword.
7171 *
7172 * @returns Strict VBox status code.
7173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7174 * @param pd80Dst Where to return the tword.
7175 * @param iSegReg The index of the segment register to use for
7176 * this access. The base and limits are checked.
7177 * @param GCPtrMem The address of the guest memory.
7178 */
7179VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7180{
7181 /* The lazy approach for now... */
7182 PCRTPBCD80U pd80Src;
7183 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7184 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7185 if (rc == VINF_SUCCESS)
7186 {
7187 *pd80Dst = *pd80Src;
7188 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7189 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7190 }
7191 return rc;
7192}
7193
7194
7195#ifdef IEM_WITH_SETJMP
7196/**
7197 * Fetches a data decimal tword, longjmp on error.
7198 *
7199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7200 * @param pd80Dst Where to return the tword.
7201 * @param iSegReg The index of the segment register to use for
7202 * this access. The base and limits are checked.
7203 * @param GCPtrMem The address of the guest memory.
7204 */
7205void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7206{
7207 /* The lazy approach for now... */
7208 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7209 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7210 *pd80Dst = *pd80Src;
7211 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7212 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7213}
7214#endif
7215
7216
7217/**
7218 * Fetches a data dqword (double qword), generally SSE related.
7219 *
7220 * @returns Strict VBox status code.
7221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7222 * @param pu128Dst Where to return the qword.
7223 * @param iSegReg The index of the segment register to use for
7224 * this access. The base and limits are checked.
7225 * @param GCPtrMem The address of the guest memory.
7226 */
7227VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7228{
7229 /* The lazy approach for now... */
7230 PCRTUINT128U pu128Src;
7231 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7232 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7233 if (rc == VINF_SUCCESS)
7234 {
7235 pu128Dst->au64[0] = pu128Src->au64[0];
7236 pu128Dst->au64[1] = pu128Src->au64[1];
7237 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7238 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7239 }
7240 return rc;
7241}
7242
7243
7244#ifdef IEM_WITH_SETJMP
7245/**
7246 * Fetches a data dqword (double qword), generally SSE related.
7247 *
7248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7249 * @param pu128Dst Where to return the qword.
7250 * @param iSegReg The index of the segment register to use for
7251 * this access. The base and limits are checked.
7252 * @param GCPtrMem The address of the guest memory.
7253 */
7254void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7255{
7256 /* The lazy approach for now... */
7257 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7258 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7259 pu128Dst->au64[0] = pu128Src->au64[0];
7260 pu128Dst->au64[1] = pu128Src->au64[1];
7261 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7262 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7263}
7264#endif
7265
7266
7267/**
7268 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7269 * related.
7270 *
7271 * Raises \#GP(0) if not aligned.
7272 *
7273 * @returns Strict VBox status code.
7274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7275 * @param pu128Dst Where to return the qword.
7276 * @param iSegReg The index of the segment register to use for
7277 * this access. The base and limits are checked.
7278 * @param GCPtrMem The address of the guest memory.
7279 */
7280VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7281{
7282 /* The lazy approach for now... */
7283 PCRTUINT128U pu128Src;
7284 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7285 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7286 if (rc == VINF_SUCCESS)
7287 {
7288 pu128Dst->au64[0] = pu128Src->au64[0];
7289 pu128Dst->au64[1] = pu128Src->au64[1];
7290 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7291 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7292 }
7293 return rc;
7294}
7295
7296
7297#ifdef IEM_WITH_SETJMP
7298/**
7299 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7300 * related, longjmp on error.
7301 *
7302 * Raises \#GP(0) if not aligned.
7303 *
7304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7305 * @param pu128Dst Where to return the qword.
7306 * @param iSegReg The index of the segment register to use for
7307 * this access. The base and limits are checked.
7308 * @param GCPtrMem The address of the guest memory.
7309 */
7310void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7311 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7312{
7313 /* The lazy approach for now... */
7314 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7315 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7316 pu128Dst->au64[0] = pu128Src->au64[0];
7317 pu128Dst->au64[1] = pu128Src->au64[1];
7318 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7319 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7320}
7321#endif
7322
7323
7324/**
7325 * Fetches a data oword (octo word), generally AVX related.
7326 *
7327 * @returns Strict VBox status code.
7328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7329 * @param pu256Dst Where to return the qword.
7330 * @param iSegReg The index of the segment register to use for
7331 * this access. The base and limits are checked.
7332 * @param GCPtrMem The address of the guest memory.
7333 */
7334VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7335{
7336 /* The lazy approach for now... */
7337 PCRTUINT256U pu256Src;
7338 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7339 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7340 if (rc == VINF_SUCCESS)
7341 {
7342 pu256Dst->au64[0] = pu256Src->au64[0];
7343 pu256Dst->au64[1] = pu256Src->au64[1];
7344 pu256Dst->au64[2] = pu256Src->au64[2];
7345 pu256Dst->au64[3] = pu256Src->au64[3];
7346 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7347 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7348 }
7349 return rc;
7350}
7351
7352
7353#ifdef IEM_WITH_SETJMP
7354/**
7355 * Fetches a data oword (octo word), generally AVX related.
7356 *
7357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7358 * @param pu256Dst Where to return the qword.
7359 * @param iSegReg The index of the segment register to use for
7360 * this access. The base and limits are checked.
7361 * @param GCPtrMem The address of the guest memory.
7362 */
7363void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7364{
7365 /* The lazy approach for now... */
7366 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7367 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7368 pu256Dst->au64[0] = pu256Src->au64[0];
7369 pu256Dst->au64[1] = pu256Src->au64[1];
7370 pu256Dst->au64[2] = pu256Src->au64[2];
7371 pu256Dst->au64[3] = pu256Src->au64[3];
7372 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7373 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7374}
7375#endif
7376
7377
7378/**
7379 * Fetches a data oword (octo word) at an aligned address, generally AVX
7380 * related.
7381 *
7382 * Raises \#GP(0) if not aligned.
7383 *
7384 * @returns Strict VBox status code.
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 * @param pu256Dst Where to return the qword.
7387 * @param iSegReg The index of the segment register to use for
7388 * this access. The base and limits are checked.
7389 * @param GCPtrMem The address of the guest memory.
7390 */
7391VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7392{
7393 /* The lazy approach for now... */
7394 PCRTUINT256U pu256Src;
7395 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7396 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7397 if (rc == VINF_SUCCESS)
7398 {
7399 pu256Dst->au64[0] = pu256Src->au64[0];
7400 pu256Dst->au64[1] = pu256Src->au64[1];
7401 pu256Dst->au64[2] = pu256Src->au64[2];
7402 pu256Dst->au64[3] = pu256Src->au64[3];
7403 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7404 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7405 }
7406 return rc;
7407}
7408
7409
7410#ifdef IEM_WITH_SETJMP
7411/**
7412 * Fetches a data oword (octo word) at an aligned address, generally AVX
7413 * related, longjmp on error.
7414 *
7415 * Raises \#GP(0) if not aligned.
7416 *
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 * @param pu256Dst Where to return the qword.
7419 * @param iSegReg The index of the segment register to use for
7420 * this access. The base and limits are checked.
7421 * @param GCPtrMem The address of the guest memory.
7422 */
7423void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7424 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7425{
7426 /* The lazy approach for now... */
7427 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7428 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7429 pu256Dst->au64[0] = pu256Src->au64[0];
7430 pu256Dst->au64[1] = pu256Src->au64[1];
7431 pu256Dst->au64[2] = pu256Src->au64[2];
7432 pu256Dst->au64[3] = pu256Src->au64[3];
7433 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7434 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7435}
7436#endif
7437
7438
7439
7440/**
7441 * Fetches a descriptor register (lgdt, lidt).
7442 *
7443 * @returns Strict VBox status code.
7444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7445 * @param pcbLimit Where to return the limit.
7446 * @param pGCPtrBase Where to return the base.
7447 * @param iSegReg The index of the segment register to use for
7448 * this access. The base and limits are checked.
7449 * @param GCPtrMem The address of the guest memory.
7450 * @param enmOpSize The effective operand size.
7451 */
7452VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7453 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7454{
7455 /*
7456 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7457 * little special:
7458 * - The two reads are done separately.
7459 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7460 * - We suspect the 386 to actually commit the limit before the base in
7461 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7462 * don't try emulate this eccentric behavior, because it's not well
7463 * enough understood and rather hard to trigger.
7464 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7465 */
7466 VBOXSTRICTRC rcStrict;
7467 if (IEM_IS_64BIT_CODE(pVCpu))
7468 {
7469 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7470 if (rcStrict == VINF_SUCCESS)
7471 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7472 }
7473 else
7474 {
7475 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7476 if (enmOpSize == IEMMODE_32BIT)
7477 {
7478 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7479 {
7480 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7481 if (rcStrict == VINF_SUCCESS)
7482 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7483 }
7484 else
7485 {
7486 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7487 if (rcStrict == VINF_SUCCESS)
7488 {
7489 *pcbLimit = (uint16_t)uTmp;
7490 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7491 }
7492 }
7493 if (rcStrict == VINF_SUCCESS)
7494 *pGCPtrBase = uTmp;
7495 }
7496 else
7497 {
7498 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7499 if (rcStrict == VINF_SUCCESS)
7500 {
7501 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7502 if (rcStrict == VINF_SUCCESS)
7503 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7504 }
7505 }
7506 }
7507 return rcStrict;
7508}
7509
7510
7511/**
7512 * Stores a data dqword.
7513 *
7514 * @returns Strict VBox status code.
7515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7516 * @param iSegReg The index of the segment register to use for
7517 * this access. The base and limits are checked.
7518 * @param GCPtrMem The address of the guest memory.
7519 * @param u128Value The value to store.
7520 */
7521VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7522{
7523 /* The lazy approach for now... */
7524 PRTUINT128U pu128Dst;
7525 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7526 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7527 if (rc == VINF_SUCCESS)
7528 {
7529 pu128Dst->au64[0] = u128Value.au64[0];
7530 pu128Dst->au64[1] = u128Value.au64[1];
7531 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7532 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7533 }
7534 return rc;
7535}
7536
7537
7538#ifdef IEM_WITH_SETJMP
7539/**
7540 * Stores a data dqword, longjmp on error.
7541 *
7542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7543 * @param iSegReg The index of the segment register to use for
7544 * this access. The base and limits are checked.
7545 * @param GCPtrMem The address of the guest memory.
7546 * @param u128Value The value to store.
7547 */
7548void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7549{
7550 /* The lazy approach for now... */
7551 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7552 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7553 pu128Dst->au64[0] = u128Value.au64[0];
7554 pu128Dst->au64[1] = u128Value.au64[1];
7555 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7556 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7557}
7558#endif
7559
7560
7561/**
7562 * Stores a data dqword, SSE aligned.
7563 *
7564 * @returns Strict VBox status code.
7565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7566 * @param iSegReg The index of the segment register to use for
7567 * this access. The base and limits are checked.
7568 * @param GCPtrMem The address of the guest memory.
7569 * @param u128Value The value to store.
7570 */
7571VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7572{
7573 /* The lazy approach for now... */
7574 PRTUINT128U pu128Dst;
7575 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7576 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7577 if (rc == VINF_SUCCESS)
7578 {
7579 pu128Dst->au64[0] = u128Value.au64[0];
7580 pu128Dst->au64[1] = u128Value.au64[1];
7581 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7582 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7583 }
7584 return rc;
7585}
7586
7587
7588#ifdef IEM_WITH_SETJMP
7589/**
7590 * Stores a data dqword, SSE aligned.
7591 *
7592 * @returns Strict VBox status code.
7593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7594 * @param iSegReg The index of the segment register to use for
7595 * this access. The base and limits are checked.
7596 * @param GCPtrMem The address of the guest memory.
7597 * @param u128Value The value to store.
7598 */
7599void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7600 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7601{
7602 /* The lazy approach for now... */
7603 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7604 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7605 pu128Dst->au64[0] = u128Value.au64[0];
7606 pu128Dst->au64[1] = u128Value.au64[1];
7607 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7608 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7609}
7610#endif
7611
7612
7613/**
7614 * Stores a data dqword.
7615 *
7616 * @returns Strict VBox status code.
7617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7618 * @param iSegReg The index of the segment register to use for
7619 * this access. The base and limits are checked.
7620 * @param GCPtrMem The address of the guest memory.
7621 * @param pu256Value Pointer to the value to store.
7622 */
7623VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7624{
7625 /* The lazy approach for now... */
7626 PRTUINT256U pu256Dst;
7627 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7628 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7629 if (rc == VINF_SUCCESS)
7630 {
7631 pu256Dst->au64[0] = pu256Value->au64[0];
7632 pu256Dst->au64[1] = pu256Value->au64[1];
7633 pu256Dst->au64[2] = pu256Value->au64[2];
7634 pu256Dst->au64[3] = pu256Value->au64[3];
7635 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7636 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7637 }
7638 return rc;
7639}
7640
7641
7642#ifdef IEM_WITH_SETJMP
7643/**
7644 * Stores a data dqword, longjmp on error.
7645 *
7646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7647 * @param iSegReg The index of the segment register to use for
7648 * this access. The base and limits are checked.
7649 * @param GCPtrMem The address of the guest memory.
7650 * @param pu256Value Pointer to the value to store.
7651 */
7652void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7653{
7654 /* The lazy approach for now... */
7655 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7656 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7657 pu256Dst->au64[0] = pu256Value->au64[0];
7658 pu256Dst->au64[1] = pu256Value->au64[1];
7659 pu256Dst->au64[2] = pu256Value->au64[2];
7660 pu256Dst->au64[3] = pu256Value->au64[3];
7661 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7662 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7663}
7664#endif
7665
7666
7667/**
7668 * Stores a data dqword, AVX \#GP(0) aligned.
7669 *
7670 * @returns Strict VBox status code.
7671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7672 * @param iSegReg The index of the segment register to use for
7673 * this access. The base and limits are checked.
7674 * @param GCPtrMem The address of the guest memory.
7675 * @param pu256Value Pointer to the value to store.
7676 */
7677VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7678{
7679 /* The lazy approach for now... */
7680 PRTUINT256U pu256Dst;
7681 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7682 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7683 if (rc == VINF_SUCCESS)
7684 {
7685 pu256Dst->au64[0] = pu256Value->au64[0];
7686 pu256Dst->au64[1] = pu256Value->au64[1];
7687 pu256Dst->au64[2] = pu256Value->au64[2];
7688 pu256Dst->au64[3] = pu256Value->au64[3];
7689 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7690 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7691 }
7692 return rc;
7693}
7694
7695
7696#ifdef IEM_WITH_SETJMP
7697/**
7698 * Stores a data dqword, AVX aligned.
7699 *
7700 * @returns Strict VBox status code.
7701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7702 * @param iSegReg The index of the segment register to use for
7703 * this access. The base and limits are checked.
7704 * @param GCPtrMem The address of the guest memory.
7705 * @param pu256Value Pointer to the value to store.
7706 */
7707void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7708 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7709{
7710 /* The lazy approach for now... */
7711 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7712 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7713 pu256Dst->au64[0] = pu256Value->au64[0];
7714 pu256Dst->au64[1] = pu256Value->au64[1];
7715 pu256Dst->au64[2] = pu256Value->au64[2];
7716 pu256Dst->au64[3] = pu256Value->au64[3];
7717 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7718 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7719}
7720#endif
7721
7722
7723/**
7724 * Stores a descriptor register (sgdt, sidt).
7725 *
7726 * @returns Strict VBox status code.
7727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7728 * @param cbLimit The limit.
7729 * @param GCPtrBase The base address.
7730 * @param iSegReg The index of the segment register to use for
7731 * this access. The base and limits are checked.
7732 * @param GCPtrMem The address of the guest memory.
7733 */
7734VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7735{
7736 /*
7737 * The SIDT and SGDT instructions actually stores the data using two
7738 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7739 * does not respond to opsize prefixes.
7740 */
7741 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7742 if (rcStrict == VINF_SUCCESS)
7743 {
7744 if (IEM_IS_16BIT_CODE(pVCpu))
7745 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7746 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7747 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7748 else if (IEM_IS_32BIT_CODE(pVCpu))
7749 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7750 else
7751 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7752 }
7753 return rcStrict;
7754}
7755
7756
7757/**
7758 * Begin a special stack push (used by interrupt, exceptions and such).
7759 *
7760 * This will raise \#SS or \#PF if appropriate.
7761 *
7762 * @returns Strict VBox status code.
7763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7764 * @param cbMem The number of bytes to push onto the stack.
7765 * @param cbAlign The alignment mask (7, 3, 1).
7766 * @param ppvMem Where to return the pointer to the stack memory.
7767 * As with the other memory functions this could be
7768 * direct access or bounce buffered access, so
7769 * don't commit register until the commit call
7770 * succeeds.
7771 * @param puNewRsp Where to return the new RSP value. This must be
7772 * passed unchanged to
7773 * iemMemStackPushCommitSpecial().
7774 */
7775VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7776 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7777{
7778 Assert(cbMem < UINT8_MAX);
7779 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7780 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
7781 IEM_ACCESS_STACK_W, cbAlign);
7782}
7783
7784
7785/**
7786 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7787 *
7788 * This will update the rSP.
7789 *
7790 * @returns Strict VBox status code.
7791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7792 * @param pvMem The pointer returned by
7793 * iemMemStackPushBeginSpecial().
7794 * @param uNewRsp The new RSP value returned by
7795 * iemMemStackPushBeginSpecial().
7796 */
7797VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
7798{
7799 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
7800 if (rcStrict == VINF_SUCCESS)
7801 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7802 return rcStrict;
7803}
7804
7805
7806/**
7807 * Begin a special stack pop (used by iret, retf and such).
7808 *
7809 * This will raise \#SS or \#PF if appropriate.
7810 *
7811 * @returns Strict VBox status code.
7812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7813 * @param cbMem The number of bytes to pop from the stack.
7814 * @param cbAlign The alignment mask (7, 3, 1).
7815 * @param ppvMem Where to return the pointer to the stack memory.
7816 * @param puNewRsp Where to return the new RSP value. This must be
7817 * assigned to CPUMCTX::rsp manually some time
7818 * after iemMemStackPopDoneSpecial() has been
7819 * called.
7820 */
7821VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7822 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7823{
7824 Assert(cbMem < UINT8_MAX);
7825 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7826 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7827}
7828
7829
7830/**
7831 * Continue a special stack pop (used by iret and retf), for the purpose of
7832 * retrieving a new stack pointer.
7833 *
7834 * This will raise \#SS or \#PF if appropriate.
7835 *
7836 * @returns Strict VBox status code.
7837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7838 * @param off Offset from the top of the stack. This is zero
7839 * except in the retf case.
7840 * @param cbMem The number of bytes to pop from the stack.
7841 * @param ppvMem Where to return the pointer to the stack memory.
7842 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7843 * return this because all use of this function is
7844 * to retrieve a new value and anything we return
7845 * here would be discarded.)
7846 */
7847VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7848 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
7849{
7850 Assert(cbMem < UINT8_MAX);
7851
7852 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7853 RTGCPTR GCPtrTop;
7854 if (IEM_IS_64BIT_CODE(pVCpu))
7855 GCPtrTop = uCurNewRsp;
7856 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7857 GCPtrTop = (uint32_t)uCurNewRsp;
7858 else
7859 GCPtrTop = (uint16_t)uCurNewRsp;
7860
7861 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7862 0 /* checked in iemMemStackPopBeginSpecial */);
7863}
7864
7865
7866/**
7867 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7868 * iemMemStackPopContinueSpecial).
7869 *
7870 * The caller will manually commit the rSP.
7871 *
7872 * @returns Strict VBox status code.
7873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7874 * @param pvMem The pointer returned by
7875 * iemMemStackPopBeginSpecial() or
7876 * iemMemStackPopContinueSpecial().
7877 */
7878VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
7879{
7880 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7881}
7882
7883
7884/**
7885 * Fetches a system table byte.
7886 *
7887 * @returns Strict VBox status code.
7888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7889 * @param pbDst Where to return the byte.
7890 * @param iSegReg The index of the segment register to use for
7891 * this access. The base and limits are checked.
7892 * @param GCPtrMem The address of the guest memory.
7893 */
7894VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7895{
7896 /* The lazy approach for now... */
7897 uint8_t const *pbSrc;
7898 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7899 if (rc == VINF_SUCCESS)
7900 {
7901 *pbDst = *pbSrc;
7902 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
7903 }
7904 return rc;
7905}
7906
7907
7908/**
7909 * Fetches a system table word.
7910 *
7911 * @returns Strict VBox status code.
7912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7913 * @param pu16Dst Where to return the word.
7914 * @param iSegReg The index of the segment register to use for
7915 * this access. The base and limits are checked.
7916 * @param GCPtrMem The address of the guest memory.
7917 */
7918VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7919{
7920 /* The lazy approach for now... */
7921 uint16_t const *pu16Src;
7922 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7923 if (rc == VINF_SUCCESS)
7924 {
7925 *pu16Dst = *pu16Src;
7926 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
7927 }
7928 return rc;
7929}
7930
7931
7932/**
7933 * Fetches a system table dword.
7934 *
7935 * @returns Strict VBox status code.
7936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7937 * @param pu32Dst Where to return the dword.
7938 * @param iSegReg The index of the segment register to use for
7939 * this access. The base and limits are checked.
7940 * @param GCPtrMem The address of the guest memory.
7941 */
7942VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7943{
7944 /* The lazy approach for now... */
7945 uint32_t const *pu32Src;
7946 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7947 if (rc == VINF_SUCCESS)
7948 {
7949 *pu32Dst = *pu32Src;
7950 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
7951 }
7952 return rc;
7953}
7954
7955
7956/**
7957 * Fetches a system table qword.
7958 *
7959 * @returns Strict VBox status code.
7960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7961 * @param pu64Dst Where to return the qword.
7962 * @param iSegReg The index of the segment register to use for
7963 * this access. The base and limits are checked.
7964 * @param GCPtrMem The address of the guest memory.
7965 */
7966VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7967{
7968 /* The lazy approach for now... */
7969 uint64_t const *pu64Src;
7970 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7971 if (rc == VINF_SUCCESS)
7972 {
7973 *pu64Dst = *pu64Src;
7974 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
7975 }
7976 return rc;
7977}
7978
7979
7980/**
7981 * Fetches a descriptor table entry with caller specified error code.
7982 *
7983 * @returns Strict VBox status code.
7984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7985 * @param pDesc Where to return the descriptor table entry.
7986 * @param uSel The selector which table entry to fetch.
7987 * @param uXcpt The exception to raise on table lookup error.
7988 * @param uErrorCode The error code associated with the exception.
7989 */
7990static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7991 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7992{
7993 AssertPtr(pDesc);
7994 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7995
7996 /** @todo did the 286 require all 8 bytes to be accessible? */
7997 /*
7998 * Get the selector table base and check bounds.
7999 */
8000 RTGCPTR GCPtrBase;
8001 if (uSel & X86_SEL_LDT)
8002 {
8003 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8004 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8005 {
8006 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8007 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8008 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8009 uErrorCode, 0);
8010 }
8011
8012 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8013 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8014 }
8015 else
8016 {
8017 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8018 {
8019 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8020 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8021 uErrorCode, 0);
8022 }
8023 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8024 }
8025
8026 /*
8027 * Read the legacy descriptor and maybe the long mode extensions if
8028 * required.
8029 */
8030 VBOXSTRICTRC rcStrict;
8031 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8032 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8033 else
8034 {
8035 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8036 if (rcStrict == VINF_SUCCESS)
8037 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8038 if (rcStrict == VINF_SUCCESS)
8039 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8040 if (rcStrict == VINF_SUCCESS)
8041 pDesc->Legacy.au16[3] = 0;
8042 else
8043 return rcStrict;
8044 }
8045
8046 if (rcStrict == VINF_SUCCESS)
8047 {
8048 if ( !IEM_IS_LONG_MODE(pVCpu)
8049 || pDesc->Legacy.Gen.u1DescType)
8050 pDesc->Long.au64[1] = 0;
8051 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8052 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8053 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8054 else
8055 {
8056 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8057 /** @todo is this the right exception? */
8058 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8059 }
8060 }
8061 return rcStrict;
8062}
8063
8064
8065/**
8066 * Fetches a descriptor table entry.
8067 *
8068 * @returns Strict VBox status code.
8069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8070 * @param pDesc Where to return the descriptor table entry.
8071 * @param uSel The selector which table entry to fetch.
8072 * @param uXcpt The exception to raise on table lookup error.
8073 */
8074VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8075{
8076 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8077}
8078
8079
8080/**
8081 * Marks the selector descriptor as accessed (only non-system descriptors).
8082 *
8083 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8084 * will therefore skip the limit checks.
8085 *
8086 * @returns Strict VBox status code.
8087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8088 * @param uSel The selector.
8089 */
8090VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8091{
8092 /*
8093 * Get the selector table base and calculate the entry address.
8094 */
8095 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8096 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8097 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8098 GCPtr += uSel & X86_SEL_MASK;
8099
8100 /*
8101 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8102 * ugly stuff to avoid this. This will make sure it's an atomic access
8103 * as well more or less remove any question about 8-bit or 32-bit accesss.
8104 */
8105 VBOXSTRICTRC rcStrict;
8106 uint32_t volatile *pu32;
8107 if ((GCPtr & 3) == 0)
8108 {
8109 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8110 GCPtr += 2 + 2;
8111 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8112 if (rcStrict != VINF_SUCCESS)
8113 return rcStrict;
8114 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8115 }
8116 else
8117 {
8118 /* The misaligned GDT/LDT case, map the whole thing. */
8119 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8120 if (rcStrict != VINF_SUCCESS)
8121 return rcStrict;
8122 switch ((uintptr_t)pu32 & 3)
8123 {
8124 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8125 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8126 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8127 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8128 }
8129 }
8130
8131 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8132}
8133
8134
8135#undef LOG_GROUP
8136#define LOG_GROUP LOG_GROUP_IEM
8137
8138/** @} */
8139
8140/** @name Opcode Helpers.
8141 * @{
8142 */
8143
8144/**
8145 * Calculates the effective address of a ModR/M memory operand.
8146 *
8147 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8148 *
8149 * @return Strict VBox status code.
8150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8151 * @param bRm The ModRM byte.
8152 * @param cbImmAndRspOffset - First byte: The size of any immediate
8153 * following the effective address opcode bytes
8154 * (only for RIP relative addressing).
8155 * - Second byte: RSP displacement (for POP [ESP]).
8156 * @param pGCPtrEff Where to return the effective address.
8157 */
8158VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8159{
8160 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8161# define SET_SS_DEF() \
8162 do \
8163 { \
8164 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8165 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8166 } while (0)
8167
8168 if (!IEM_IS_64BIT_CODE(pVCpu))
8169 {
8170/** @todo Check the effective address size crap! */
8171 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8172 {
8173 uint16_t u16EffAddr;
8174
8175 /* Handle the disp16 form with no registers first. */
8176 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8177 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8178 else
8179 {
8180 /* Get the displacment. */
8181 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8182 {
8183 case 0: u16EffAddr = 0; break;
8184 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8185 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8186 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8187 }
8188
8189 /* Add the base and index registers to the disp. */
8190 switch (bRm & X86_MODRM_RM_MASK)
8191 {
8192 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8193 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8194 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8195 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8196 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8197 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8198 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8199 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8200 }
8201 }
8202
8203 *pGCPtrEff = u16EffAddr;
8204 }
8205 else
8206 {
8207 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8208 uint32_t u32EffAddr;
8209
8210 /* Handle the disp32 form with no registers first. */
8211 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8212 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8213 else
8214 {
8215 /* Get the register (or SIB) value. */
8216 switch ((bRm & X86_MODRM_RM_MASK))
8217 {
8218 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8219 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8220 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8221 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8222 case 4: /* SIB */
8223 {
8224 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8225
8226 /* Get the index and scale it. */
8227 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8228 {
8229 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8230 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8231 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8232 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8233 case 4: u32EffAddr = 0; /*none */ break;
8234 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8235 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8236 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8237 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8238 }
8239 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8240
8241 /* add base */
8242 switch (bSib & X86_SIB_BASE_MASK)
8243 {
8244 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8245 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8246 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8247 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8248 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8249 case 5:
8250 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8251 {
8252 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8253 SET_SS_DEF();
8254 }
8255 else
8256 {
8257 uint32_t u32Disp;
8258 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8259 u32EffAddr += u32Disp;
8260 }
8261 break;
8262 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8263 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8264 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8265 }
8266 break;
8267 }
8268 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8269 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8270 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8271 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8272 }
8273
8274 /* Get and add the displacement. */
8275 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8276 {
8277 case 0:
8278 break;
8279 case 1:
8280 {
8281 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8282 u32EffAddr += i8Disp;
8283 break;
8284 }
8285 case 2:
8286 {
8287 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8288 u32EffAddr += u32Disp;
8289 break;
8290 }
8291 default:
8292 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8293 }
8294
8295 }
8296 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8297 *pGCPtrEff = u32EffAddr;
8298 }
8299 }
8300 else
8301 {
8302 uint64_t u64EffAddr;
8303
8304 /* Handle the rip+disp32 form with no registers first. */
8305 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8306 {
8307 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8308 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8309 }
8310 else
8311 {
8312 /* Get the register (or SIB) value. */
8313 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8314 {
8315 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8316 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8317 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8318 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8319 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8320 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8321 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8322 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8323 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8324 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8325 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8326 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8327 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8328 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8329 /* SIB */
8330 case 4:
8331 case 12:
8332 {
8333 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8334
8335 /* Get the index and scale it. */
8336 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8337 {
8338 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8339 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8340 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8341 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8342 case 4: u64EffAddr = 0; /*none */ break;
8343 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8344 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8345 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8346 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8347 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8348 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8349 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8350 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8351 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8352 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8353 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8354 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8355 }
8356 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8357
8358 /* add base */
8359 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8360 {
8361 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8362 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8363 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8364 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8365 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8366 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8367 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8368 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8369 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8370 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8371 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8372 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8373 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8374 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8375 /* complicated encodings */
8376 case 5:
8377 case 13:
8378 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8379 {
8380 if (!pVCpu->iem.s.uRexB)
8381 {
8382 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8383 SET_SS_DEF();
8384 }
8385 else
8386 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8387 }
8388 else
8389 {
8390 uint32_t u32Disp;
8391 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8392 u64EffAddr += (int32_t)u32Disp;
8393 }
8394 break;
8395 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8396 }
8397 break;
8398 }
8399 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8400 }
8401
8402 /* Get and add the displacement. */
8403 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8404 {
8405 case 0:
8406 break;
8407 case 1:
8408 {
8409 int8_t i8Disp;
8410 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8411 u64EffAddr += i8Disp;
8412 break;
8413 }
8414 case 2:
8415 {
8416 uint32_t u32Disp;
8417 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8418 u64EffAddr += (int32_t)u32Disp;
8419 break;
8420 }
8421 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8422 }
8423
8424 }
8425
8426 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8427 *pGCPtrEff = u64EffAddr;
8428 else
8429 {
8430 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8431 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8432 }
8433 }
8434
8435 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8436 return VINF_SUCCESS;
8437}
8438
8439
8440#ifdef IEM_WITH_SETJMP
8441/**
8442 * Calculates the effective address of a ModR/M memory operand.
8443 *
8444 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8445 *
8446 * May longjmp on internal error.
8447 *
8448 * @return The effective address.
8449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8450 * @param bRm The ModRM byte.
8451 * @param cbImmAndRspOffset - First byte: The size of any immediate
8452 * following the effective address opcode bytes
8453 * (only for RIP relative addressing).
8454 * - Second byte: RSP displacement (for POP [ESP]).
8455 */
8456RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8457{
8458 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8459# define SET_SS_DEF() \
8460 do \
8461 { \
8462 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8463 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8464 } while (0)
8465
8466 if (!IEM_IS_64BIT_CODE(pVCpu))
8467 {
8468/** @todo Check the effective address size crap! */
8469 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8470 {
8471 uint16_t u16EffAddr;
8472
8473 /* Handle the disp16 form with no registers first. */
8474 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8475 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8476 else
8477 {
8478 /* Get the displacment. */
8479 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8480 {
8481 case 0: u16EffAddr = 0; break;
8482 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8483 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8484 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8485 }
8486
8487 /* Add the base and index registers to the disp. */
8488 switch (bRm & X86_MODRM_RM_MASK)
8489 {
8490 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8491 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8492 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8493 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8494 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8495 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8496 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8497 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8498 }
8499 }
8500
8501 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8502 return u16EffAddr;
8503 }
8504
8505 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8506 uint32_t u32EffAddr;
8507
8508 /* Handle the disp32 form with no registers first. */
8509 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8510 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8511 else
8512 {
8513 /* Get the register (or SIB) value. */
8514 switch ((bRm & X86_MODRM_RM_MASK))
8515 {
8516 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8517 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8518 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8519 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8520 case 4: /* SIB */
8521 {
8522 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8523
8524 /* Get the index and scale it. */
8525 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8526 {
8527 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8528 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8529 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8530 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8531 case 4: u32EffAddr = 0; /*none */ break;
8532 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8533 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8534 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8535 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8536 }
8537 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8538
8539 /* add base */
8540 switch (bSib & X86_SIB_BASE_MASK)
8541 {
8542 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8543 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8544 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8545 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8546 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8547 case 5:
8548 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8549 {
8550 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8551 SET_SS_DEF();
8552 }
8553 else
8554 {
8555 uint32_t u32Disp;
8556 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8557 u32EffAddr += u32Disp;
8558 }
8559 break;
8560 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8561 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8562 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8563 }
8564 break;
8565 }
8566 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8567 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8568 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8569 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8570 }
8571
8572 /* Get and add the displacement. */
8573 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8574 {
8575 case 0:
8576 break;
8577 case 1:
8578 {
8579 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8580 u32EffAddr += i8Disp;
8581 break;
8582 }
8583 case 2:
8584 {
8585 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8586 u32EffAddr += u32Disp;
8587 break;
8588 }
8589 default:
8590 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8591 }
8592 }
8593
8594 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8595 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8596 return u32EffAddr;
8597 }
8598
8599 uint64_t u64EffAddr;
8600
8601 /* Handle the rip+disp32 form with no registers first. */
8602 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8603 {
8604 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8605 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8606 }
8607 else
8608 {
8609 /* Get the register (or SIB) value. */
8610 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8611 {
8612 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8613 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8614 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8615 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8616 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8617 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8618 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8619 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8620 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8621 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8622 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8623 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8624 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8625 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8626 /* SIB */
8627 case 4:
8628 case 12:
8629 {
8630 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8631
8632 /* Get the index and scale it. */
8633 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8634 {
8635 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8636 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8637 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8638 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8639 case 4: u64EffAddr = 0; /*none */ break;
8640 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8641 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8642 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8643 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8644 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8645 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8646 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8647 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8648 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8649 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8650 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8651 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8652 }
8653 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8654
8655 /* add base */
8656 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8657 {
8658 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8659 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8660 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8661 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8662 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8663 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8664 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8665 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8666 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8667 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8668 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8669 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8670 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8671 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8672 /* complicated encodings */
8673 case 5:
8674 case 13:
8675 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8676 {
8677 if (!pVCpu->iem.s.uRexB)
8678 {
8679 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8680 SET_SS_DEF();
8681 }
8682 else
8683 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8684 }
8685 else
8686 {
8687 uint32_t u32Disp;
8688 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8689 u64EffAddr += (int32_t)u32Disp;
8690 }
8691 break;
8692 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8693 }
8694 break;
8695 }
8696 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8697 }
8698
8699 /* Get and add the displacement. */
8700 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8701 {
8702 case 0:
8703 break;
8704 case 1:
8705 {
8706 int8_t i8Disp;
8707 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8708 u64EffAddr += i8Disp;
8709 break;
8710 }
8711 case 2:
8712 {
8713 uint32_t u32Disp;
8714 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8715 u64EffAddr += (int32_t)u32Disp;
8716 break;
8717 }
8718 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8719 }
8720
8721 }
8722
8723 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8724 {
8725 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8726 return u64EffAddr;
8727 }
8728 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8729 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8730 return u64EffAddr & UINT32_MAX;
8731}
8732#endif /* IEM_WITH_SETJMP */
8733
8734
8735/**
8736 * Calculates the effective address of a ModR/M memory operand, extended version
8737 * for use in the recompilers.
8738 *
8739 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8740 *
8741 * @return Strict VBox status code.
8742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8743 * @param bRm The ModRM byte.
8744 * @param cbImmAndRspOffset - First byte: The size of any immediate
8745 * following the effective address opcode bytes
8746 * (only for RIP relative addressing).
8747 * - Second byte: RSP displacement (for POP [ESP]).
8748 * @param pGCPtrEff Where to return the effective address.
8749 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8750 * SIB byte (bits 39:32).
8751 */
8752VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8753{
8754 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8755# define SET_SS_DEF() \
8756 do \
8757 { \
8758 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8759 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8760 } while (0)
8761
8762 uint64_t uInfo;
8763 if (!IEM_IS_64BIT_CODE(pVCpu))
8764 {
8765/** @todo Check the effective address size crap! */
8766 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8767 {
8768 uint16_t u16EffAddr;
8769
8770 /* Handle the disp16 form with no registers first. */
8771 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8772 {
8773 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8774 uInfo = u16EffAddr;
8775 }
8776 else
8777 {
8778 /* Get the displacment. */
8779 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8780 {
8781 case 0: u16EffAddr = 0; break;
8782 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8783 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8784 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8785 }
8786 uInfo = u16EffAddr;
8787
8788 /* Add the base and index registers to the disp. */
8789 switch (bRm & X86_MODRM_RM_MASK)
8790 {
8791 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8792 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8793 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8794 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8795 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8796 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8797 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8798 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8799 }
8800 }
8801
8802 *pGCPtrEff = u16EffAddr;
8803 }
8804 else
8805 {
8806 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8807 uint32_t u32EffAddr;
8808
8809 /* Handle the disp32 form with no registers first. */
8810 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8811 {
8812 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8813 uInfo = u32EffAddr;
8814 }
8815 else
8816 {
8817 /* Get the register (or SIB) value. */
8818 uInfo = 0;
8819 switch ((bRm & X86_MODRM_RM_MASK))
8820 {
8821 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8822 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8823 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8824 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8825 case 4: /* SIB */
8826 {
8827 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8828 uInfo = (uint64_t)bSib << 32;
8829
8830 /* Get the index and scale it. */
8831 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8832 {
8833 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8834 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8835 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8836 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8837 case 4: u32EffAddr = 0; /*none */ break;
8838 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8839 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8840 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8841 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8842 }
8843 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8844
8845 /* add base */
8846 switch (bSib & X86_SIB_BASE_MASK)
8847 {
8848 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8849 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8850 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8851 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8852 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8853 case 5:
8854 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8855 {
8856 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8857 SET_SS_DEF();
8858 }
8859 else
8860 {
8861 uint32_t u32Disp;
8862 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8863 u32EffAddr += u32Disp;
8864 uInfo |= u32Disp;
8865 }
8866 break;
8867 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8868 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8869 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8870 }
8871 break;
8872 }
8873 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8874 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8875 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8876 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8877 }
8878
8879 /* Get and add the displacement. */
8880 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8881 {
8882 case 0:
8883 break;
8884 case 1:
8885 {
8886 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8887 u32EffAddr += i8Disp;
8888 uInfo |= (uint32_t)(int32_t)i8Disp;
8889 break;
8890 }
8891 case 2:
8892 {
8893 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8894 u32EffAddr += u32Disp;
8895 uInfo |= (uint32_t)u32Disp;
8896 break;
8897 }
8898 default:
8899 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8900 }
8901
8902 }
8903 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8904 *pGCPtrEff = u32EffAddr;
8905 }
8906 }
8907 else
8908 {
8909 uint64_t u64EffAddr;
8910
8911 /* Handle the rip+disp32 form with no registers first. */
8912 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8913 {
8914 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8915 uInfo = (uint32_t)u64EffAddr;
8916 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8917 }
8918 else
8919 {
8920 /* Get the register (or SIB) value. */
8921 uInfo = 0;
8922 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8923 {
8924 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8925 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8926 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8927 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8928 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8929 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8930 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8931 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8932 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8933 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8934 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8935 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8936 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8937 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8938 /* SIB */
8939 case 4:
8940 case 12:
8941 {
8942 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8943 uInfo = (uint64_t)bSib << 32;
8944
8945 /* Get the index and scale it. */
8946 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8947 {
8948 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8949 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8950 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8951 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8952 case 4: u64EffAddr = 0; /*none */ break;
8953 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8954 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8955 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8956 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8957 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8958 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8959 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8960 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8961 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8962 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8963 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8964 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8965 }
8966 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8967
8968 /* add base */
8969 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8970 {
8971 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8972 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8973 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8974 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8975 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8976 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8977 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8978 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8979 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8980 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8981 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8982 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8983 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8984 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8985 /* complicated encodings */
8986 case 5:
8987 case 13:
8988 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8989 {
8990 if (!pVCpu->iem.s.uRexB)
8991 {
8992 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8993 SET_SS_DEF();
8994 }
8995 else
8996 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8997 }
8998 else
8999 {
9000 uint32_t u32Disp;
9001 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9002 u64EffAddr += (int32_t)u32Disp;
9003 uInfo |= u32Disp;
9004 }
9005 break;
9006 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9007 }
9008 break;
9009 }
9010 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9011 }
9012
9013 /* Get and add the displacement. */
9014 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9015 {
9016 case 0:
9017 break;
9018 case 1:
9019 {
9020 int8_t i8Disp;
9021 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9022 u64EffAddr += i8Disp;
9023 uInfo |= (uint32_t)(int32_t)i8Disp;
9024 break;
9025 }
9026 case 2:
9027 {
9028 uint32_t u32Disp;
9029 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9030 u64EffAddr += (int32_t)u32Disp;
9031 uInfo |= u32Disp;
9032 break;
9033 }
9034 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9035 }
9036
9037 }
9038
9039 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9040 *pGCPtrEff = u64EffAddr;
9041 else
9042 {
9043 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9044 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9045 }
9046 }
9047 *puInfo = uInfo;
9048
9049 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9050 return VINF_SUCCESS;
9051}
9052
9053/** @} */
9054
9055
9056#ifdef LOG_ENABLED
9057/**
9058 * Logs the current instruction.
9059 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9060 * @param fSameCtx Set if we have the same context information as the VMM,
9061 * clear if we may have already executed an instruction in
9062 * our debug context. When clear, we assume IEMCPU holds
9063 * valid CPU mode info.
9064 *
9065 * The @a fSameCtx parameter is now misleading and obsolete.
9066 * @param pszFunction The IEM function doing the execution.
9067 */
9068static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9069{
9070# ifdef IN_RING3
9071 if (LogIs2Enabled())
9072 {
9073 char szInstr[256];
9074 uint32_t cbInstr = 0;
9075 if (fSameCtx)
9076 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9077 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9078 szInstr, sizeof(szInstr), &cbInstr);
9079 else
9080 {
9081 uint32_t fFlags = 0;
9082 switch (IEM_GET_CPU_MODE(pVCpu))
9083 {
9084 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9085 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9086 case IEMMODE_16BIT:
9087 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9088 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9089 else
9090 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9091 break;
9092 }
9093 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9094 szInstr, sizeof(szInstr), &cbInstr);
9095 }
9096
9097 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9098 Log2(("**** %s fExec=%x\n"
9099 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9100 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9101 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9102 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9103 " %s\n"
9104 , pszFunction, pVCpu->iem.s.fExec,
9105 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9106 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9107 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9108 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9109 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9110 szInstr));
9111
9112 /* This stuff sucks atm. as it fills the log with MSRs. */
9113 //if (LogIs3Enabled())
9114 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9115 }
9116 else
9117# endif
9118 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9119 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9120 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9121}
9122#endif /* LOG_ENABLED */
9123
9124
9125#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9126/**
9127 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9128 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9129 *
9130 * @returns Modified rcStrict.
9131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9132 * @param rcStrict The instruction execution status.
9133 */
9134static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9135{
9136 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9137 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9138 {
9139 /* VMX preemption timer takes priority over NMI-window exits. */
9140 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9141 {
9142 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9143 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9144 }
9145 /*
9146 * Check remaining intercepts.
9147 *
9148 * NMI-window and Interrupt-window VM-exits.
9149 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9150 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9151 *
9152 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9153 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9154 */
9155 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9156 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9157 && !TRPMHasTrap(pVCpu))
9158 {
9159 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9160 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9161 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9162 {
9163 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9164 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9165 }
9166 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9167 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9168 {
9169 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9170 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9171 }
9172 }
9173 }
9174 /* TPR-below threshold/APIC write has the highest priority. */
9175 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9176 {
9177 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9178 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9179 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9180 }
9181 /* MTF takes priority over VMX-preemption timer. */
9182 else
9183 {
9184 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9185 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9186 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9187 }
9188 return rcStrict;
9189}
9190#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9191
9192
9193/**
9194 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9195 * IEMExecOneWithPrefetchedByPC.
9196 *
9197 * Similar code is found in IEMExecLots.
9198 *
9199 * @return Strict VBox status code.
9200 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9201 * @param fExecuteInhibit If set, execute the instruction following CLI,
9202 * POP SS and MOV SS,GR.
9203 * @param pszFunction The calling function name.
9204 */
9205DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9206{
9207 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9208 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9209 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9210 RT_NOREF_PV(pszFunction);
9211
9212#ifdef IEM_WITH_SETJMP
9213 VBOXSTRICTRC rcStrict;
9214 IEM_TRY_SETJMP(pVCpu, rcStrict)
9215 {
9216 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9217 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9218 }
9219 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9220 {
9221 pVCpu->iem.s.cLongJumps++;
9222 }
9223 IEM_CATCH_LONGJMP_END(pVCpu);
9224#else
9225 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9226 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9227#endif
9228 if (rcStrict == VINF_SUCCESS)
9229 pVCpu->iem.s.cInstructions++;
9230 if (pVCpu->iem.s.cActiveMappings > 0)
9231 {
9232 Assert(rcStrict != VINF_SUCCESS);
9233 iemMemRollback(pVCpu);
9234 }
9235 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9236 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9237 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9238
9239//#ifdef DEBUG
9240// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9241//#endif
9242
9243#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9244 /*
9245 * Perform any VMX nested-guest instruction boundary actions.
9246 *
9247 * If any of these causes a VM-exit, we must skip executing the next
9248 * instruction (would run into stale page tables). A VM-exit makes sure
9249 * there is no interrupt-inhibition, so that should ensure we don't go
9250 * to try execute the next instruction. Clearing fExecuteInhibit is
9251 * problematic because of the setjmp/longjmp clobbering above.
9252 */
9253 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9254 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9255 || rcStrict != VINF_SUCCESS)
9256 { /* likely */ }
9257 else
9258 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9259#endif
9260
9261 /* Execute the next instruction as well if a cli, pop ss or
9262 mov ss, Gr has just completed successfully. */
9263 if ( fExecuteInhibit
9264 && rcStrict == VINF_SUCCESS
9265 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9266 {
9267 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9268 if (rcStrict == VINF_SUCCESS)
9269 {
9270#ifdef LOG_ENABLED
9271 iemLogCurInstr(pVCpu, false, pszFunction);
9272#endif
9273#ifdef IEM_WITH_SETJMP
9274 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9275 {
9276 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9277 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9278 }
9279 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9280 {
9281 pVCpu->iem.s.cLongJumps++;
9282 }
9283 IEM_CATCH_LONGJMP_END(pVCpu);
9284#else
9285 IEM_OPCODE_GET_FIRST_U8(&b);
9286 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9287#endif
9288 if (rcStrict == VINF_SUCCESS)
9289 {
9290 pVCpu->iem.s.cInstructions++;
9291#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9292 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9293 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9294 { /* likely */ }
9295 else
9296 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9297#endif
9298 }
9299 if (pVCpu->iem.s.cActiveMappings > 0)
9300 {
9301 Assert(rcStrict != VINF_SUCCESS);
9302 iemMemRollback(pVCpu);
9303 }
9304 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9305 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9306 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9307 }
9308 else if (pVCpu->iem.s.cActiveMappings > 0)
9309 iemMemRollback(pVCpu);
9310 /** @todo drop this after we bake this change into RIP advancing. */
9311 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9312 }
9313
9314 /*
9315 * Return value fiddling, statistics and sanity assertions.
9316 */
9317 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9318
9319 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9320 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9321 return rcStrict;
9322}
9323
9324
9325/**
9326 * Execute one instruction.
9327 *
9328 * @return Strict VBox status code.
9329 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9330 */
9331VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9332{
9333 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9334#ifdef LOG_ENABLED
9335 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9336#endif
9337
9338 /*
9339 * Do the decoding and emulation.
9340 */
9341 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9342 if (rcStrict == VINF_SUCCESS)
9343 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9344 else if (pVCpu->iem.s.cActiveMappings > 0)
9345 iemMemRollback(pVCpu);
9346
9347 if (rcStrict != VINF_SUCCESS)
9348 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9349 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9350 return rcStrict;
9351}
9352
9353
9354VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9355{
9356 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9357 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9358 if (rcStrict == VINF_SUCCESS)
9359 {
9360 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9361 if (pcbWritten)
9362 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9363 }
9364 else if (pVCpu->iem.s.cActiveMappings > 0)
9365 iemMemRollback(pVCpu);
9366
9367 return rcStrict;
9368}
9369
9370
9371VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9372 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9373{
9374 VBOXSTRICTRC rcStrict;
9375 if ( cbOpcodeBytes
9376 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9377 {
9378 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9379#ifdef IEM_WITH_CODE_TLB
9380 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9381 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9382 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9383 pVCpu->iem.s.offCurInstrStart = 0;
9384 pVCpu->iem.s.offInstrNextByte = 0;
9385 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9386#else
9387 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9388 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9389#endif
9390 rcStrict = VINF_SUCCESS;
9391 }
9392 else
9393 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9394 if (rcStrict == VINF_SUCCESS)
9395 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9396 else if (pVCpu->iem.s.cActiveMappings > 0)
9397 iemMemRollback(pVCpu);
9398
9399 return rcStrict;
9400}
9401
9402
9403VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9404{
9405 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9406 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9407 if (rcStrict == VINF_SUCCESS)
9408 {
9409 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9410 if (pcbWritten)
9411 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9412 }
9413 else if (pVCpu->iem.s.cActiveMappings > 0)
9414 iemMemRollback(pVCpu);
9415
9416 return rcStrict;
9417}
9418
9419
9420VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9421 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9422{
9423 VBOXSTRICTRC rcStrict;
9424 if ( cbOpcodeBytes
9425 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9426 {
9427 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9428#ifdef IEM_WITH_CODE_TLB
9429 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9430 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9431 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9432 pVCpu->iem.s.offCurInstrStart = 0;
9433 pVCpu->iem.s.offInstrNextByte = 0;
9434 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9435#else
9436 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9437 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9438#endif
9439 rcStrict = VINF_SUCCESS;
9440 }
9441 else
9442 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9443 if (rcStrict == VINF_SUCCESS)
9444 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9445 else if (pVCpu->iem.s.cActiveMappings > 0)
9446 iemMemRollback(pVCpu);
9447
9448 return rcStrict;
9449}
9450
9451
9452/**
9453 * For handling split cacheline lock operations when the host has split-lock
9454 * detection enabled.
9455 *
9456 * This will cause the interpreter to disregard the lock prefix and implicit
9457 * locking (xchg).
9458 *
9459 * @returns Strict VBox status code.
9460 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9461 */
9462VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9463{
9464 /*
9465 * Do the decoding and emulation.
9466 */
9467 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9468 if (rcStrict == VINF_SUCCESS)
9469 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9470 else if (pVCpu->iem.s.cActiveMappings > 0)
9471 iemMemRollback(pVCpu);
9472
9473 if (rcStrict != VINF_SUCCESS)
9474 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9475 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9476 return rcStrict;
9477}
9478
9479
9480/**
9481 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9482 * inject a pending TRPM trap.
9483 */
9484VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9485{
9486 Assert(TRPMHasTrap(pVCpu));
9487
9488 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9489 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9490 {
9491 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9492#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9493 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9494 if (fIntrEnabled)
9495 {
9496 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9497 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9498 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9499 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9500 else
9501 {
9502 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9503 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9504 }
9505 }
9506#else
9507 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9508#endif
9509 if (fIntrEnabled)
9510 {
9511 uint8_t u8TrapNo;
9512 TRPMEVENT enmType;
9513 uint32_t uErrCode;
9514 RTGCPTR uCr2;
9515 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9516 AssertRC(rc2);
9517 Assert(enmType == TRPM_HARDWARE_INT);
9518 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9519
9520 TRPMResetTrap(pVCpu);
9521
9522#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9523 /* Injecting an event may cause a VM-exit. */
9524 if ( rcStrict != VINF_SUCCESS
9525 && rcStrict != VINF_IEM_RAISED_XCPT)
9526 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9527#else
9528 NOREF(rcStrict);
9529#endif
9530 }
9531 }
9532
9533 return VINF_SUCCESS;
9534}
9535
9536
9537VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9538{
9539 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9540 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9541 Assert(cMaxInstructions > 0);
9542
9543 /*
9544 * See if there is an interrupt pending in TRPM, inject it if we can.
9545 */
9546 /** @todo What if we are injecting an exception and not an interrupt? Is that
9547 * possible here? For now we assert it is indeed only an interrupt. */
9548 if (!TRPMHasTrap(pVCpu))
9549 { /* likely */ }
9550 else
9551 {
9552 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9553 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9554 { /*likely */ }
9555 else
9556 return rcStrict;
9557 }
9558
9559 /*
9560 * Initial decoder init w/ prefetch, then setup setjmp.
9561 */
9562 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9563 if (rcStrict == VINF_SUCCESS)
9564 {
9565#ifdef IEM_WITH_SETJMP
9566 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9567 IEM_TRY_SETJMP(pVCpu, rcStrict)
9568#endif
9569 {
9570 /*
9571 * The run loop. We limit ourselves to 4096 instructions right now.
9572 */
9573 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9574 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9575 for (;;)
9576 {
9577 /*
9578 * Log the state.
9579 */
9580#ifdef LOG_ENABLED
9581 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9582#endif
9583
9584 /*
9585 * Do the decoding and emulation.
9586 */
9587 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9588 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9589#ifdef VBOX_STRICT
9590 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9591#endif
9592 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9593 {
9594 Assert(pVCpu->iem.s.cActiveMappings == 0);
9595 pVCpu->iem.s.cInstructions++;
9596
9597#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9598 /* Perform any VMX nested-guest instruction boundary actions. */
9599 uint64_t fCpu = pVCpu->fLocalForcedActions;
9600 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9601 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9602 { /* likely */ }
9603 else
9604 {
9605 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9606 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9607 fCpu = pVCpu->fLocalForcedActions;
9608 else
9609 {
9610 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9611 break;
9612 }
9613 }
9614#endif
9615 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9616 {
9617#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9618 uint64_t fCpu = pVCpu->fLocalForcedActions;
9619#endif
9620 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9621 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9622 | VMCPU_FF_TLB_FLUSH
9623 | VMCPU_FF_UNHALT );
9624
9625 if (RT_LIKELY( ( !fCpu
9626 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9627 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9628 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9629 {
9630 if (--cMaxInstructionsGccStupidity > 0)
9631 {
9632 /* Poll timers every now an then according to the caller's specs. */
9633 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9634 || !TMTimerPollBool(pVM, pVCpu))
9635 {
9636 Assert(pVCpu->iem.s.cActiveMappings == 0);
9637 iemReInitDecoder(pVCpu);
9638 continue;
9639 }
9640 }
9641 }
9642 }
9643 Assert(pVCpu->iem.s.cActiveMappings == 0);
9644 }
9645 else if (pVCpu->iem.s.cActiveMappings > 0)
9646 iemMemRollback(pVCpu);
9647 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9648 break;
9649 }
9650 }
9651#ifdef IEM_WITH_SETJMP
9652 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9653 {
9654 if (pVCpu->iem.s.cActiveMappings > 0)
9655 iemMemRollback(pVCpu);
9656# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9657 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9658# endif
9659 pVCpu->iem.s.cLongJumps++;
9660 }
9661 IEM_CATCH_LONGJMP_END(pVCpu);
9662#endif
9663
9664 /*
9665 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9666 */
9667 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9668 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9669 }
9670 else
9671 {
9672 if (pVCpu->iem.s.cActiveMappings > 0)
9673 iemMemRollback(pVCpu);
9674
9675#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9676 /*
9677 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9678 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9679 */
9680 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9681#endif
9682 }
9683
9684 /*
9685 * Maybe re-enter raw-mode and log.
9686 */
9687 if (rcStrict != VINF_SUCCESS)
9688 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9689 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9690 if (pcInstructions)
9691 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9692 return rcStrict;
9693}
9694
9695
9696/**
9697 * Interface used by EMExecuteExec, does exit statistics and limits.
9698 *
9699 * @returns Strict VBox status code.
9700 * @param pVCpu The cross context virtual CPU structure.
9701 * @param fWillExit To be defined.
9702 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9703 * @param cMaxInstructions Maximum number of instructions to execute.
9704 * @param cMaxInstructionsWithoutExits
9705 * The max number of instructions without exits.
9706 * @param pStats Where to return statistics.
9707 */
9708VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9709 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9710{
9711 NOREF(fWillExit); /** @todo define flexible exit crits */
9712
9713 /*
9714 * Initialize return stats.
9715 */
9716 pStats->cInstructions = 0;
9717 pStats->cExits = 0;
9718 pStats->cMaxExitDistance = 0;
9719 pStats->cReserved = 0;
9720
9721 /*
9722 * Initial decoder init w/ prefetch, then setup setjmp.
9723 */
9724 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9725 if (rcStrict == VINF_SUCCESS)
9726 {
9727#ifdef IEM_WITH_SETJMP
9728 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9729 IEM_TRY_SETJMP(pVCpu, rcStrict)
9730#endif
9731 {
9732#ifdef IN_RING0
9733 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9734#endif
9735 uint32_t cInstructionSinceLastExit = 0;
9736
9737 /*
9738 * The run loop. We limit ourselves to 4096 instructions right now.
9739 */
9740 PVM pVM = pVCpu->CTX_SUFF(pVM);
9741 for (;;)
9742 {
9743 /*
9744 * Log the state.
9745 */
9746#ifdef LOG_ENABLED
9747 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9748#endif
9749
9750 /*
9751 * Do the decoding and emulation.
9752 */
9753 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9754
9755 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9756 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9757
9758 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9759 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9760 {
9761 pStats->cExits += 1;
9762 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9763 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9764 cInstructionSinceLastExit = 0;
9765 }
9766
9767 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9768 {
9769 Assert(pVCpu->iem.s.cActiveMappings == 0);
9770 pVCpu->iem.s.cInstructions++;
9771 pStats->cInstructions++;
9772 cInstructionSinceLastExit++;
9773
9774#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9775 /* Perform any VMX nested-guest instruction boundary actions. */
9776 uint64_t fCpu = pVCpu->fLocalForcedActions;
9777 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9778 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9779 { /* likely */ }
9780 else
9781 {
9782 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9783 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9784 fCpu = pVCpu->fLocalForcedActions;
9785 else
9786 {
9787 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9788 break;
9789 }
9790 }
9791#endif
9792 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9793 {
9794#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9795 uint64_t fCpu = pVCpu->fLocalForcedActions;
9796#endif
9797 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9798 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9799 | VMCPU_FF_TLB_FLUSH
9800 | VMCPU_FF_UNHALT );
9801 if (RT_LIKELY( ( ( !fCpu
9802 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9803 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9804 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9805 || pStats->cInstructions < cMinInstructions))
9806 {
9807 if (pStats->cInstructions < cMaxInstructions)
9808 {
9809 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9810 {
9811#ifdef IN_RING0
9812 if ( !fCheckPreemptionPending
9813 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9814#endif
9815 {
9816 Assert(pVCpu->iem.s.cActiveMappings == 0);
9817 iemReInitDecoder(pVCpu);
9818 continue;
9819 }
9820#ifdef IN_RING0
9821 rcStrict = VINF_EM_RAW_INTERRUPT;
9822 break;
9823#endif
9824 }
9825 }
9826 }
9827 Assert(!(fCpu & VMCPU_FF_IEM));
9828 }
9829 Assert(pVCpu->iem.s.cActiveMappings == 0);
9830 }
9831 else if (pVCpu->iem.s.cActiveMappings > 0)
9832 iemMemRollback(pVCpu);
9833 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9834 break;
9835 }
9836 }
9837#ifdef IEM_WITH_SETJMP
9838 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9839 {
9840 if (pVCpu->iem.s.cActiveMappings > 0)
9841 iemMemRollback(pVCpu);
9842 pVCpu->iem.s.cLongJumps++;
9843 }
9844 IEM_CATCH_LONGJMP_END(pVCpu);
9845#endif
9846
9847 /*
9848 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9849 */
9850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9851 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9852 }
9853 else
9854 {
9855 if (pVCpu->iem.s.cActiveMappings > 0)
9856 iemMemRollback(pVCpu);
9857
9858#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9859 /*
9860 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9861 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9862 */
9863 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9864#endif
9865 }
9866
9867 /*
9868 * Maybe re-enter raw-mode and log.
9869 */
9870 if (rcStrict != VINF_SUCCESS)
9871 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9872 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9873 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9874 return rcStrict;
9875}
9876
9877
9878/**
9879 * Injects a trap, fault, abort, software interrupt or external interrupt.
9880 *
9881 * The parameter list matches TRPMQueryTrapAll pretty closely.
9882 *
9883 * @returns Strict VBox status code.
9884 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9885 * @param u8TrapNo The trap number.
9886 * @param enmType What type is it (trap/fault/abort), software
9887 * interrupt or hardware interrupt.
9888 * @param uErrCode The error code if applicable.
9889 * @param uCr2 The CR2 value if applicable.
9890 * @param cbInstr The instruction length (only relevant for
9891 * software interrupts).
9892 */
9893VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9894 uint8_t cbInstr)
9895{
9896 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9897#ifdef DBGFTRACE_ENABLED
9898 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9899 u8TrapNo, enmType, uErrCode, uCr2);
9900#endif
9901
9902 uint32_t fFlags;
9903 switch (enmType)
9904 {
9905 case TRPM_HARDWARE_INT:
9906 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9907 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9908 uErrCode = uCr2 = 0;
9909 break;
9910
9911 case TRPM_SOFTWARE_INT:
9912 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9913 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9914 uErrCode = uCr2 = 0;
9915 break;
9916
9917 case TRPM_TRAP:
9918 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9919 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9920 if (u8TrapNo == X86_XCPT_PF)
9921 fFlags |= IEM_XCPT_FLAGS_CR2;
9922 switch (u8TrapNo)
9923 {
9924 case X86_XCPT_DF:
9925 case X86_XCPT_TS:
9926 case X86_XCPT_NP:
9927 case X86_XCPT_SS:
9928 case X86_XCPT_PF:
9929 case X86_XCPT_AC:
9930 case X86_XCPT_GP:
9931 fFlags |= IEM_XCPT_FLAGS_ERR;
9932 break;
9933 }
9934 break;
9935
9936 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9937 }
9938
9939 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9940
9941 if (pVCpu->iem.s.cActiveMappings > 0)
9942 iemMemRollback(pVCpu);
9943
9944 return rcStrict;
9945}
9946
9947
9948/**
9949 * Injects the active TRPM event.
9950 *
9951 * @returns Strict VBox status code.
9952 * @param pVCpu The cross context virtual CPU structure.
9953 */
9954VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9955{
9956#ifndef IEM_IMPLEMENTS_TASKSWITCH
9957 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9958#else
9959 uint8_t u8TrapNo;
9960 TRPMEVENT enmType;
9961 uint32_t uErrCode;
9962 RTGCUINTPTR uCr2;
9963 uint8_t cbInstr;
9964 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9965 if (RT_FAILURE(rc))
9966 return rc;
9967
9968 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9969 * ICEBP \#DB injection as a special case. */
9970 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9971#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9972 if (rcStrict == VINF_SVM_VMEXIT)
9973 rcStrict = VINF_SUCCESS;
9974#endif
9975#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9976 if (rcStrict == VINF_VMX_VMEXIT)
9977 rcStrict = VINF_SUCCESS;
9978#endif
9979 /** @todo Are there any other codes that imply the event was successfully
9980 * delivered to the guest? See @bugref{6607}. */
9981 if ( rcStrict == VINF_SUCCESS
9982 || rcStrict == VINF_IEM_RAISED_XCPT)
9983 TRPMResetTrap(pVCpu);
9984
9985 return rcStrict;
9986#endif
9987}
9988
9989
9990VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9991{
9992 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9993 return VERR_NOT_IMPLEMENTED;
9994}
9995
9996
9997VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9998{
9999 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10000 return VERR_NOT_IMPLEMENTED;
10001}
10002
10003
10004/**
10005 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10006 *
10007 * This API ASSUMES that the caller has already verified that the guest code is
10008 * allowed to access the I/O port. (The I/O port is in the DX register in the
10009 * guest state.)
10010 *
10011 * @returns Strict VBox status code.
10012 * @param pVCpu The cross context virtual CPU structure.
10013 * @param cbValue The size of the I/O port access (1, 2, or 4).
10014 * @param enmAddrMode The addressing mode.
10015 * @param fRepPrefix Indicates whether a repeat prefix is used
10016 * (doesn't matter which for this instruction).
10017 * @param cbInstr The instruction length in bytes.
10018 * @param iEffSeg The effective segment address.
10019 * @param fIoChecked Whether the access to the I/O port has been
10020 * checked or not. It's typically checked in the
10021 * HM scenario.
10022 */
10023VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10024 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10025{
10026 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10027 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10028
10029 /*
10030 * State init.
10031 */
10032 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10033
10034 /*
10035 * Switch orgy for getting to the right handler.
10036 */
10037 VBOXSTRICTRC rcStrict;
10038 if (fRepPrefix)
10039 {
10040 switch (enmAddrMode)
10041 {
10042 case IEMMODE_16BIT:
10043 switch (cbValue)
10044 {
10045 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10046 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10047 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10048 default:
10049 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10050 }
10051 break;
10052
10053 case IEMMODE_32BIT:
10054 switch (cbValue)
10055 {
10056 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10057 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10058 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10059 default:
10060 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10061 }
10062 break;
10063
10064 case IEMMODE_64BIT:
10065 switch (cbValue)
10066 {
10067 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10068 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10069 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10070 default:
10071 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10072 }
10073 break;
10074
10075 default:
10076 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10077 }
10078 }
10079 else
10080 {
10081 switch (enmAddrMode)
10082 {
10083 case IEMMODE_16BIT:
10084 switch (cbValue)
10085 {
10086 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10087 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10088 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10089 default:
10090 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10091 }
10092 break;
10093
10094 case IEMMODE_32BIT:
10095 switch (cbValue)
10096 {
10097 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10098 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10099 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10100 default:
10101 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10102 }
10103 break;
10104
10105 case IEMMODE_64BIT:
10106 switch (cbValue)
10107 {
10108 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10109 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10110 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10111 default:
10112 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10113 }
10114 break;
10115
10116 default:
10117 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10118 }
10119 }
10120
10121 if (pVCpu->iem.s.cActiveMappings)
10122 iemMemRollback(pVCpu);
10123
10124 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10125}
10126
10127
10128/**
10129 * Interface for HM and EM for executing string I/O IN (read) instructions.
10130 *
10131 * This API ASSUMES that the caller has already verified that the guest code is
10132 * allowed to access the I/O port. (The I/O port is in the DX register in the
10133 * guest state.)
10134 *
10135 * @returns Strict VBox status code.
10136 * @param pVCpu The cross context virtual CPU structure.
10137 * @param cbValue The size of the I/O port access (1, 2, or 4).
10138 * @param enmAddrMode The addressing mode.
10139 * @param fRepPrefix Indicates whether a repeat prefix is used
10140 * (doesn't matter which for this instruction).
10141 * @param cbInstr The instruction length in bytes.
10142 * @param fIoChecked Whether the access to the I/O port has been
10143 * checked or not. It's typically checked in the
10144 * HM scenario.
10145 */
10146VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10147 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10148{
10149 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10150
10151 /*
10152 * State init.
10153 */
10154 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10155
10156 /*
10157 * Switch orgy for getting to the right handler.
10158 */
10159 VBOXSTRICTRC rcStrict;
10160 if (fRepPrefix)
10161 {
10162 switch (enmAddrMode)
10163 {
10164 case IEMMODE_16BIT:
10165 switch (cbValue)
10166 {
10167 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10168 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10169 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10170 default:
10171 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10172 }
10173 break;
10174
10175 case IEMMODE_32BIT:
10176 switch (cbValue)
10177 {
10178 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10179 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10180 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10181 default:
10182 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10183 }
10184 break;
10185
10186 case IEMMODE_64BIT:
10187 switch (cbValue)
10188 {
10189 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10190 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10191 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10192 default:
10193 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10194 }
10195 break;
10196
10197 default:
10198 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10199 }
10200 }
10201 else
10202 {
10203 switch (enmAddrMode)
10204 {
10205 case IEMMODE_16BIT:
10206 switch (cbValue)
10207 {
10208 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10209 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10210 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10211 default:
10212 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10213 }
10214 break;
10215
10216 case IEMMODE_32BIT:
10217 switch (cbValue)
10218 {
10219 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10220 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10221 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10222 default:
10223 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10224 }
10225 break;
10226
10227 case IEMMODE_64BIT:
10228 switch (cbValue)
10229 {
10230 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10231 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10232 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10233 default:
10234 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10235 }
10236 break;
10237
10238 default:
10239 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10240 }
10241 }
10242
10243 if ( pVCpu->iem.s.cActiveMappings == 0
10244 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10245 { /* likely */ }
10246 else
10247 {
10248 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10249 iemMemRollback(pVCpu);
10250 }
10251 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10252}
10253
10254
10255/**
10256 * Interface for rawmode to write execute an OUT instruction.
10257 *
10258 * @returns Strict VBox status code.
10259 * @param pVCpu The cross context virtual CPU structure.
10260 * @param cbInstr The instruction length in bytes.
10261 * @param u16Port The port to read.
10262 * @param fImm Whether the port is specified using an immediate operand or
10263 * using the implicit DX register.
10264 * @param cbReg The register size.
10265 *
10266 * @remarks In ring-0 not all of the state needs to be synced in.
10267 */
10268VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10269{
10270 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10271 Assert(cbReg <= 4 && cbReg != 3);
10272
10273 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10274 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10275 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10276 Assert(!pVCpu->iem.s.cActiveMappings);
10277 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10278}
10279
10280
10281/**
10282 * Interface for rawmode to write execute an IN instruction.
10283 *
10284 * @returns Strict VBox status code.
10285 * @param pVCpu The cross context virtual CPU structure.
10286 * @param cbInstr The instruction length in bytes.
10287 * @param u16Port The port to read.
10288 * @param fImm Whether the port is specified using an immediate operand or
10289 * using the implicit DX.
10290 * @param cbReg The register size.
10291 */
10292VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10293{
10294 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10295 Assert(cbReg <= 4 && cbReg != 3);
10296
10297 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10298 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10299 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10300 Assert(!pVCpu->iem.s.cActiveMappings);
10301 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10302}
10303
10304
10305/**
10306 * Interface for HM and EM to write to a CRx register.
10307 *
10308 * @returns Strict VBox status code.
10309 * @param pVCpu The cross context virtual CPU structure.
10310 * @param cbInstr The instruction length in bytes.
10311 * @param iCrReg The control register number (destination).
10312 * @param iGReg The general purpose register number (source).
10313 *
10314 * @remarks In ring-0 not all of the state needs to be synced in.
10315 */
10316VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10317{
10318 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10319 Assert(iCrReg < 16);
10320 Assert(iGReg < 16);
10321
10322 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10323 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10324 Assert(!pVCpu->iem.s.cActiveMappings);
10325 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10326}
10327
10328
10329/**
10330 * Interface for HM and EM to read from a CRx register.
10331 *
10332 * @returns Strict VBox status code.
10333 * @param pVCpu The cross context virtual CPU structure.
10334 * @param cbInstr The instruction length in bytes.
10335 * @param iGReg The general purpose register number (destination).
10336 * @param iCrReg The control register number (source).
10337 *
10338 * @remarks In ring-0 not all of the state needs to be synced in.
10339 */
10340VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10341{
10342 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10343 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10344 | CPUMCTX_EXTRN_APIC_TPR);
10345 Assert(iCrReg < 16);
10346 Assert(iGReg < 16);
10347
10348 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10349 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10350 Assert(!pVCpu->iem.s.cActiveMappings);
10351 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10352}
10353
10354
10355/**
10356 * Interface for HM and EM to write to a DRx register.
10357 *
10358 * @returns Strict VBox status code.
10359 * @param pVCpu The cross context virtual CPU structure.
10360 * @param cbInstr The instruction length in bytes.
10361 * @param iDrReg The debug register number (destination).
10362 * @param iGReg The general purpose register number (source).
10363 *
10364 * @remarks In ring-0 not all of the state needs to be synced in.
10365 */
10366VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10367{
10368 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10369 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10370 Assert(iDrReg < 8);
10371 Assert(iGReg < 16);
10372
10373 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10374 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10375 Assert(!pVCpu->iem.s.cActiveMappings);
10376 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10377}
10378
10379
10380/**
10381 * Interface for HM and EM to read from a DRx register.
10382 *
10383 * @returns Strict VBox status code.
10384 * @param pVCpu The cross context virtual CPU structure.
10385 * @param cbInstr The instruction length in bytes.
10386 * @param iGReg The general purpose register number (destination).
10387 * @param iDrReg The debug register number (source).
10388 *
10389 * @remarks In ring-0 not all of the state needs to be synced in.
10390 */
10391VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10392{
10393 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10394 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10395 Assert(iDrReg < 8);
10396 Assert(iGReg < 16);
10397
10398 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10399 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10400 Assert(!pVCpu->iem.s.cActiveMappings);
10401 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10402}
10403
10404
10405/**
10406 * Interface for HM and EM to clear the CR0[TS] bit.
10407 *
10408 * @returns Strict VBox status code.
10409 * @param pVCpu The cross context virtual CPU structure.
10410 * @param cbInstr The instruction length in bytes.
10411 *
10412 * @remarks In ring-0 not all of the state needs to be synced in.
10413 */
10414VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10415{
10416 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10417
10418 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10419 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10420 Assert(!pVCpu->iem.s.cActiveMappings);
10421 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10422}
10423
10424
10425/**
10426 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10427 *
10428 * @returns Strict VBox status code.
10429 * @param pVCpu The cross context virtual CPU structure.
10430 * @param cbInstr The instruction length in bytes.
10431 * @param uValue The value to load into CR0.
10432 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10433 * memory operand. Otherwise pass NIL_RTGCPTR.
10434 *
10435 * @remarks In ring-0 not all of the state needs to be synced in.
10436 */
10437VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10438{
10439 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10440
10441 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10442 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10443 Assert(!pVCpu->iem.s.cActiveMappings);
10444 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10445}
10446
10447
10448/**
10449 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10450 *
10451 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10452 *
10453 * @returns Strict VBox status code.
10454 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10455 * @param cbInstr The instruction length in bytes.
10456 * @remarks In ring-0 not all of the state needs to be synced in.
10457 * @thread EMT(pVCpu)
10458 */
10459VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10460{
10461 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10462
10463 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10464 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10465 Assert(!pVCpu->iem.s.cActiveMappings);
10466 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10467}
10468
10469
10470/**
10471 * Interface for HM and EM to emulate the WBINVD instruction.
10472 *
10473 * @returns Strict VBox status code.
10474 * @param pVCpu The cross context virtual CPU structure.
10475 * @param cbInstr The instruction length in bytes.
10476 *
10477 * @remarks In ring-0 not all of the state needs to be synced in.
10478 */
10479VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10480{
10481 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10482
10483 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10484 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10485 Assert(!pVCpu->iem.s.cActiveMappings);
10486 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10487}
10488
10489
10490/**
10491 * Interface for HM and EM to emulate the INVD instruction.
10492 *
10493 * @returns Strict VBox status code.
10494 * @param pVCpu The cross context virtual CPU structure.
10495 * @param cbInstr The instruction length in bytes.
10496 *
10497 * @remarks In ring-0 not all of the state needs to be synced in.
10498 */
10499VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10500{
10501 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10502
10503 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10504 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10505 Assert(!pVCpu->iem.s.cActiveMappings);
10506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10507}
10508
10509
10510/**
10511 * Interface for HM and EM to emulate the INVLPG instruction.
10512 *
10513 * @returns Strict VBox status code.
10514 * @retval VINF_PGM_SYNC_CR3
10515 *
10516 * @param pVCpu The cross context virtual CPU structure.
10517 * @param cbInstr The instruction length in bytes.
10518 * @param GCPtrPage The effective address of the page to invalidate.
10519 *
10520 * @remarks In ring-0 not all of the state needs to be synced in.
10521 */
10522VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10523{
10524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10525
10526 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10527 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10528 Assert(!pVCpu->iem.s.cActiveMappings);
10529 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10530}
10531
10532
10533/**
10534 * Interface for HM and EM to emulate the INVPCID instruction.
10535 *
10536 * @returns Strict VBox status code.
10537 * @retval VINF_PGM_SYNC_CR3
10538 *
10539 * @param pVCpu The cross context virtual CPU structure.
10540 * @param cbInstr The instruction length in bytes.
10541 * @param iEffSeg The effective segment register.
10542 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10543 * @param uType The invalidation type.
10544 *
10545 * @remarks In ring-0 not all of the state needs to be synced in.
10546 */
10547VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10548 uint64_t uType)
10549{
10550 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10551
10552 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10553 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10554 Assert(!pVCpu->iem.s.cActiveMappings);
10555 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10556}
10557
10558
10559/**
10560 * Interface for HM and EM to emulate the CPUID instruction.
10561 *
10562 * @returns Strict VBox status code.
10563 *
10564 * @param pVCpu The cross context virtual CPU structure.
10565 * @param cbInstr The instruction length in bytes.
10566 *
10567 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10568 */
10569VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10570{
10571 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10572 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10573
10574 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10575 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10576 Assert(!pVCpu->iem.s.cActiveMappings);
10577 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10578}
10579
10580
10581/**
10582 * Interface for HM and EM to emulate the RDPMC instruction.
10583 *
10584 * @returns Strict VBox status code.
10585 *
10586 * @param pVCpu The cross context virtual CPU structure.
10587 * @param cbInstr The instruction length in bytes.
10588 *
10589 * @remarks Not all of the state needs to be synced in.
10590 */
10591VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10592{
10593 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10594 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10595
10596 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10597 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10598 Assert(!pVCpu->iem.s.cActiveMappings);
10599 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10600}
10601
10602
10603/**
10604 * Interface for HM and EM to emulate the RDTSC instruction.
10605 *
10606 * @returns Strict VBox status code.
10607 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10608 *
10609 * @param pVCpu The cross context virtual CPU structure.
10610 * @param cbInstr The instruction length in bytes.
10611 *
10612 * @remarks Not all of the state needs to be synced in.
10613 */
10614VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10615{
10616 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10617 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10618
10619 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10620 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10621 Assert(!pVCpu->iem.s.cActiveMappings);
10622 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10623}
10624
10625
10626/**
10627 * Interface for HM and EM to emulate the RDTSCP instruction.
10628 *
10629 * @returns Strict VBox status code.
10630 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10631 *
10632 * @param pVCpu The cross context virtual CPU structure.
10633 * @param cbInstr The instruction length in bytes.
10634 *
10635 * @remarks Not all of the state needs to be synced in. Recommended
10636 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10637 */
10638VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10639{
10640 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10641 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10642
10643 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10644 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10645 Assert(!pVCpu->iem.s.cActiveMappings);
10646 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10647}
10648
10649
10650/**
10651 * Interface for HM and EM to emulate the RDMSR instruction.
10652 *
10653 * @returns Strict VBox status code.
10654 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10655 *
10656 * @param pVCpu The cross context virtual CPU structure.
10657 * @param cbInstr The instruction length in bytes.
10658 *
10659 * @remarks Not all of the state needs to be synced in. Requires RCX and
10660 * (currently) all MSRs.
10661 */
10662VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10663{
10664 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10665 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10666
10667 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10668 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10669 Assert(!pVCpu->iem.s.cActiveMappings);
10670 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10671}
10672
10673
10674/**
10675 * Interface for HM and EM to emulate the WRMSR instruction.
10676 *
10677 * @returns Strict VBox status code.
10678 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10679 *
10680 * @param pVCpu The cross context virtual CPU structure.
10681 * @param cbInstr The instruction length in bytes.
10682 *
10683 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10684 * and (currently) all MSRs.
10685 */
10686VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10687{
10688 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10689 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10690 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10691
10692 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10693 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10694 Assert(!pVCpu->iem.s.cActiveMappings);
10695 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10696}
10697
10698
10699/**
10700 * Interface for HM and EM to emulate the MONITOR instruction.
10701 *
10702 * @returns Strict VBox status code.
10703 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10704 *
10705 * @param pVCpu The cross context virtual CPU structure.
10706 * @param cbInstr The instruction length in bytes.
10707 *
10708 * @remarks Not all of the state needs to be synced in.
10709 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10710 * are used.
10711 */
10712VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10713{
10714 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10715 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10716
10717 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10718 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10719 Assert(!pVCpu->iem.s.cActiveMappings);
10720 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10721}
10722
10723
10724/**
10725 * Interface for HM and EM to emulate the MWAIT instruction.
10726 *
10727 * @returns Strict VBox status code.
10728 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10729 *
10730 * @param pVCpu The cross context virtual CPU structure.
10731 * @param cbInstr The instruction length in bytes.
10732 *
10733 * @remarks Not all of the state needs to be synced in.
10734 */
10735VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10736{
10737 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10738 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10739
10740 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10741 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10742 Assert(!pVCpu->iem.s.cActiveMappings);
10743 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10744}
10745
10746
10747/**
10748 * Interface for HM and EM to emulate the HLT instruction.
10749 *
10750 * @returns Strict VBox status code.
10751 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10752 *
10753 * @param pVCpu The cross context virtual CPU structure.
10754 * @param cbInstr The instruction length in bytes.
10755 *
10756 * @remarks Not all of the state needs to be synced in.
10757 */
10758VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10759{
10760 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10761
10762 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10763 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10764 Assert(!pVCpu->iem.s.cActiveMappings);
10765 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10766}
10767
10768
10769/**
10770 * Checks if IEM is in the process of delivering an event (interrupt or
10771 * exception).
10772 *
10773 * @returns true if we're in the process of raising an interrupt or exception,
10774 * false otherwise.
10775 * @param pVCpu The cross context virtual CPU structure.
10776 * @param puVector Where to store the vector associated with the
10777 * currently delivered event, optional.
10778 * @param pfFlags Where to store th event delivery flags (see
10779 * IEM_XCPT_FLAGS_XXX), optional.
10780 * @param puErr Where to store the error code associated with the
10781 * event, optional.
10782 * @param puCr2 Where to store the CR2 associated with the event,
10783 * optional.
10784 * @remarks The caller should check the flags to determine if the error code and
10785 * CR2 are valid for the event.
10786 */
10787VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10788{
10789 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10790 if (fRaisingXcpt)
10791 {
10792 if (puVector)
10793 *puVector = pVCpu->iem.s.uCurXcpt;
10794 if (pfFlags)
10795 *pfFlags = pVCpu->iem.s.fCurXcpt;
10796 if (puErr)
10797 *puErr = pVCpu->iem.s.uCurXcptErr;
10798 if (puCr2)
10799 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10800 }
10801 return fRaisingXcpt;
10802}
10803
10804#ifdef IN_RING3
10805
10806/**
10807 * Handles the unlikely and probably fatal merge cases.
10808 *
10809 * @returns Merged status code.
10810 * @param rcStrict Current EM status code.
10811 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10812 * with @a rcStrict.
10813 * @param iMemMap The memory mapping index. For error reporting only.
10814 * @param pVCpu The cross context virtual CPU structure of the calling
10815 * thread, for error reporting only.
10816 */
10817DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10818 unsigned iMemMap, PVMCPUCC pVCpu)
10819{
10820 if (RT_FAILURE_NP(rcStrict))
10821 return rcStrict;
10822
10823 if (RT_FAILURE_NP(rcStrictCommit))
10824 return rcStrictCommit;
10825
10826 if (rcStrict == rcStrictCommit)
10827 return rcStrictCommit;
10828
10829 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10830 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10831 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10832 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10833 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10834 return VERR_IOM_FF_STATUS_IPE;
10835}
10836
10837
10838/**
10839 * Helper for IOMR3ProcessForceFlag.
10840 *
10841 * @returns Merged status code.
10842 * @param rcStrict Current EM status code.
10843 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10844 * with @a rcStrict.
10845 * @param iMemMap The memory mapping index. For error reporting only.
10846 * @param pVCpu The cross context virtual CPU structure of the calling
10847 * thread, for error reporting only.
10848 */
10849DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10850{
10851 /* Simple. */
10852 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10853 return rcStrictCommit;
10854
10855 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10856 return rcStrict;
10857
10858 /* EM scheduling status codes. */
10859 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10860 && rcStrict <= VINF_EM_LAST))
10861 {
10862 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10863 && rcStrictCommit <= VINF_EM_LAST))
10864 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10865 }
10866
10867 /* Unlikely */
10868 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10869}
10870
10871
10872/**
10873 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10874 *
10875 * @returns Merge between @a rcStrict and what the commit operation returned.
10876 * @param pVM The cross context VM structure.
10877 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10878 * @param rcStrict The status code returned by ring-0 or raw-mode.
10879 */
10880VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10881{
10882 /*
10883 * Reset the pending commit.
10884 */
10885 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10886 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10887 ("%#x %#x %#x\n",
10888 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10889 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10890
10891 /*
10892 * Commit the pending bounce buffers (usually just one).
10893 */
10894 unsigned cBufs = 0;
10895 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10896 while (iMemMap-- > 0)
10897 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10898 {
10899 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10900 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10901 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10902
10903 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10904 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10905 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10906
10907 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10908 {
10909 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10911 pbBuf,
10912 cbFirst,
10913 PGMACCESSORIGIN_IEM);
10914 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10915 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10916 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10917 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10918 }
10919
10920 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10921 {
10922 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10923 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10924 pbBuf + cbFirst,
10925 cbSecond,
10926 PGMACCESSORIGIN_IEM);
10927 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10928 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10929 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10930 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10931 }
10932 cBufs++;
10933 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10934 }
10935
10936 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10937 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10938 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10939 pVCpu->iem.s.cActiveMappings = 0;
10940 return rcStrict;
10941}
10942
10943#endif /* IN_RING3 */
10944
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette