VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 97156

Last change on this file since 97156 was 97156, checked in by vboxsync, 2 years ago

IEM: Quick fix to support PC/AT compatible math exception handling, analogous to what we do in HM. Required for DOS, OS/2, and other old guests.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 460.1 KB
Line 
1/* $Id: IEMAll.cpp 97156 2022-10-14 12:10:26Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <VBox/disopcode.h>
130#include <iprt/asm-math.h>
131#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
132# include <iprt/asm-amd64-x86.h>
133#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
134# include <iprt/asm-arm.h>
135#endif
136#include <iprt/assert.h>
137#include <iprt/string.h>
138#include <iprt/x86.h>
139
140#include "IEMInline.h"
141
142
143/*********************************************************************************************************************************
144* Structures and Typedefs *
145*********************************************************************************************************************************/
146/**
147 * CPU exception classes.
148 */
149typedef enum IEMXCPTCLASS
150{
151 IEMXCPTCLASS_BENIGN,
152 IEMXCPTCLASS_CONTRIBUTORY,
153 IEMXCPTCLASS_PAGE_FAULT,
154 IEMXCPTCLASS_DOUBLE_FAULT
155} IEMXCPTCLASS;
156
157
158/*********************************************************************************************************************************
159* Global Variables *
160*********************************************************************************************************************************/
161#if defined(IEM_LOG_MEMORY_WRITES)
162/** What IEM just wrote. */
163uint8_t g_abIemWrote[256];
164/** How much IEM just wrote. */
165size_t g_cbIemWrote;
166#endif
167
168
169/*********************************************************************************************************************************
170* Internal Functions *
171*********************************************************************************************************************************/
172static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
173 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
174
175
176/**
177 * Initializes the decoder state.
178 *
179 * iemReInitDecoder is mostly a copy of this function.
180 *
181 * @param pVCpu The cross context virtual CPU structure of the
182 * calling thread.
183 * @param fBypassHandlers Whether to bypass access handlers.
184 * @param fDisregardLock Whether to disregard the LOCK prefix.
185 */
186DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
187{
188 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
189 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
191 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
192 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
195 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
197 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
198
199 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
200 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
201 pVCpu->iem.s.enmCpuMode = enmMode;
202 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
203 pVCpu->iem.s.enmEffAddrMode = enmMode;
204 if (enmMode != IEMMODE_64BIT)
205 {
206 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
207 pVCpu->iem.s.enmEffOpSize = enmMode;
208 }
209 else
210 {
211 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
212 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
213 }
214 pVCpu->iem.s.fPrefixes = 0;
215 pVCpu->iem.s.uRexReg = 0;
216 pVCpu->iem.s.uRexB = 0;
217 pVCpu->iem.s.uRexIndex = 0;
218 pVCpu->iem.s.idxPrefix = 0;
219 pVCpu->iem.s.uVex3rdReg = 0;
220 pVCpu->iem.s.uVexLength = 0;
221 pVCpu->iem.s.fEvexStuff = 0;
222 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
223#ifdef IEM_WITH_CODE_TLB
224 pVCpu->iem.s.pbInstrBuf = NULL;
225 pVCpu->iem.s.offInstrNextByte = 0;
226 pVCpu->iem.s.offCurInstrStart = 0;
227# ifdef VBOX_STRICT
228 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
229 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
230 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
231# endif
232#else
233 pVCpu->iem.s.offOpcode = 0;
234 pVCpu->iem.s.cbOpcode = 0;
235#endif
236 pVCpu->iem.s.offModRm = 0;
237 pVCpu->iem.s.cActiveMappings = 0;
238 pVCpu->iem.s.iNextMapping = 0;
239 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
240 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
241 pVCpu->iem.s.fDisregardLock = fDisregardLock;
242
243#ifdef DBGFTRACE_ENABLED
244 switch (enmMode)
245 {
246 case IEMMODE_64BIT:
247 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
248 break;
249 case IEMMODE_32BIT:
250 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
251 break;
252 case IEMMODE_16BIT:
253 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
254 break;
255 }
256#endif
257}
258
259
260/**
261 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
262 *
263 * This is mostly a copy of iemInitDecoder.
264 *
265 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
266 */
267DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
268{
269 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
270 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
271 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
278
279 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
280 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
281 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
282 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
283 pVCpu->iem.s.enmEffAddrMode = enmMode;
284 if (enmMode != IEMMODE_64BIT)
285 {
286 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffOpSize = enmMode;
288 }
289 else
290 {
291 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
292 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
293 }
294 pVCpu->iem.s.fPrefixes = 0;
295 pVCpu->iem.s.uRexReg = 0;
296 pVCpu->iem.s.uRexB = 0;
297 pVCpu->iem.s.uRexIndex = 0;
298 pVCpu->iem.s.idxPrefix = 0;
299 pVCpu->iem.s.uVex3rdReg = 0;
300 pVCpu->iem.s.uVexLength = 0;
301 pVCpu->iem.s.fEvexStuff = 0;
302 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
303#ifdef IEM_WITH_CODE_TLB
304 if (pVCpu->iem.s.pbInstrBuf)
305 {
306 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
307 - pVCpu->iem.s.uInstrBufPc;
308 if (off < pVCpu->iem.s.cbInstrBufTotal)
309 {
310 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
311 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
312 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
313 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
314 else
315 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
316 }
317 else
318 {
319 pVCpu->iem.s.pbInstrBuf = NULL;
320 pVCpu->iem.s.offInstrNextByte = 0;
321 pVCpu->iem.s.offCurInstrStart = 0;
322 pVCpu->iem.s.cbInstrBuf = 0;
323 pVCpu->iem.s.cbInstrBufTotal = 0;
324 }
325 }
326 else
327 {
328 pVCpu->iem.s.offInstrNextByte = 0;
329 pVCpu->iem.s.offCurInstrStart = 0;
330 pVCpu->iem.s.cbInstrBuf = 0;
331 pVCpu->iem.s.cbInstrBufTotal = 0;
332 }
333#else
334 pVCpu->iem.s.cbOpcode = 0;
335 pVCpu->iem.s.offOpcode = 0;
336#endif
337 pVCpu->iem.s.offModRm = 0;
338 Assert(pVCpu->iem.s.cActiveMappings == 0);
339 pVCpu->iem.s.iNextMapping = 0;
340 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
341 Assert(pVCpu->iem.s.fBypassHandlers == false);
342
343#ifdef DBGFTRACE_ENABLED
344 switch (enmMode)
345 {
346 case IEMMODE_64BIT:
347 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
348 break;
349 case IEMMODE_32BIT:
350 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
351 break;
352 case IEMMODE_16BIT:
353 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
354 break;
355 }
356#endif
357}
358
359
360
361/**
362 * Prefetch opcodes the first time when starting executing.
363 *
364 * @returns Strict VBox status code.
365 * @param pVCpu The cross context virtual CPU structure of the
366 * calling thread.
367 * @param fBypassHandlers Whether to bypass access handlers.
368 * @param fDisregardLock Whether to disregard LOCK prefixes.
369 *
370 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
371 * store them as such.
372 */
373static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
374{
375 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
376
377#ifdef IEM_WITH_CODE_TLB
378 /** @todo Do ITLB lookup here. */
379
380#else /* !IEM_WITH_CODE_TLB */
381
382 /*
383 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
384 *
385 * First translate CS:rIP to a physical address.
386 */
387 uint32_t cbToTryRead;
388 RTGCPTR GCPtrPC;
389 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
390 {
391 cbToTryRead = GUEST_PAGE_SIZE;
392 GCPtrPC = pVCpu->cpum.GstCtx.rip;
393 if (IEM_IS_CANONICAL(GCPtrPC))
394 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
395 else
396 return iemRaiseGeneralProtectionFault0(pVCpu);
397 }
398 else
399 {
400 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
401 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
402 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
403 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
404 else
405 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
406 if (cbToTryRead) { /* likely */ }
407 else /* overflowed */
408 {
409 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
410 cbToTryRead = UINT32_MAX;
411 }
412 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
413 Assert(GCPtrPC <= UINT32_MAX);
414 }
415
416 PGMPTWALK Walk;
417 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
418 if (RT_SUCCESS(rc))
419 Assert(Walk.fSucceeded); /* probable. */
420 else
421 {
422 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
423#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
424 if (Walk.fFailed & PGM_WALKFAIL_EPT)
425 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
426#endif
427 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
428 }
429 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
430 else
431 {
432 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
433#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
434 if (Walk.fFailed & PGM_WALKFAIL_EPT)
435 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
436#endif
437 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
438 }
439 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
440 else
441 {
442 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
444 if (Walk.fFailed & PGM_WALKFAIL_EPT)
445 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
446#endif
447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
448 }
449 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
450 /** @todo Check reserved bits and such stuff. PGM is better at doing
451 * that, so do it when implementing the guest virtual address
452 * TLB... */
453
454 /*
455 * Read the bytes at this address.
456 */
457 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
458 if (cbToTryRead > cbLeftOnPage)
459 cbToTryRead = cbLeftOnPage;
460 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
461 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
462
463 if (!pVCpu->iem.s.fBypassHandlers)
464 {
465 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
467 { /* likely */ }
468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
469 {
470 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
473 }
474 else
475 {
476 Log((RT_SUCCESS(rcStrict)
477 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
478 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
479 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
480 return rcStrict;
481 }
482 }
483 else
484 {
485 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
486 if (RT_SUCCESS(rc))
487 { /* likely */ }
488 else
489 {
490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
491 GCPtrPC, GCPhys, rc, cbToTryRead));
492 return rc;
493 }
494 }
495 pVCpu->iem.s.cbOpcode = cbToTryRead;
496#endif /* !IEM_WITH_CODE_TLB */
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Invalidates the IEM TLBs.
503 *
504 * This is called internally as well as by PGM when moving GC mappings.
505 *
506 * @returns
507 * @param pVCpu The cross context virtual CPU structure of the calling
508 * thread.
509 */
510VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
511{
512#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
513 Log10(("IEMTlbInvalidateAll\n"));
514# ifdef IEM_WITH_CODE_TLB
515 pVCpu->iem.s.cbInstrBufTotal = 0;
516 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
517 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
518 { /* very likely */ }
519 else
520 {
521 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
522 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
523 while (i-- > 0)
524 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
525 }
526# endif
527
528# ifdef IEM_WITH_DATA_TLB
529 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
530 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
531 { /* very likely */ }
532 else
533 {
534 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
535 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
536 while (i-- > 0)
537 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
538 }
539# endif
540#else
541 RT_NOREF(pVCpu);
542#endif
543}
544
545
546/**
547 * Invalidates a page in the TLBs.
548 *
549 * @param pVCpu The cross context virtual CPU structure of the calling
550 * thread.
551 * @param GCPtr The address of the page to invalidate
552 * @thread EMT(pVCpu)
553 */
554VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
555{
556#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
557 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
558 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
559 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
560 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
561
562# ifdef IEM_WITH_CODE_TLB
563 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
564 {
565 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
566 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
567 pVCpu->iem.s.cbInstrBufTotal = 0;
568 }
569# endif
570
571# ifdef IEM_WITH_DATA_TLB
572 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
573 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
574# endif
575#else
576 NOREF(pVCpu); NOREF(GCPtr);
577#endif
578}
579
580
581#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
582/**
583 * Invalid both TLBs slow fashion following a rollover.
584 *
585 * Worker for IEMTlbInvalidateAllPhysical,
586 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
587 * iemMemMapJmp and others.
588 *
589 * @thread EMT(pVCpu)
590 */
591static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
592{
593 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
594 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
595 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
596
597 unsigned i;
598# ifdef IEM_WITH_CODE_TLB
599 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
600 while (i-- > 0)
601 {
602 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
603 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
604 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
605 }
606# endif
607# ifdef IEM_WITH_DATA_TLB
608 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
609 while (i-- > 0)
610 {
611 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
612 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
613 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
614 }
615# endif
616
617}
618#endif
619
620
621/**
622 * Invalidates the host physical aspects of the IEM TLBs.
623 *
624 * This is called internally as well as by PGM when moving GC mappings.
625 *
626 * @param pVCpu The cross context virtual CPU structure of the calling
627 * thread.
628 * @note Currently not used.
629 */
630VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
631{
632#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
633 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
634 Log10(("IEMTlbInvalidateAllPhysical\n"));
635
636# ifdef IEM_WITH_CODE_TLB
637 pVCpu->iem.s.cbInstrBufTotal = 0;
638# endif
639 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
640 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
641 {
642 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
643 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
644 }
645 else
646 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
647#else
648 NOREF(pVCpu);
649#endif
650}
651
652
653/**
654 * Invalidates the host physical aspects of the IEM TLBs.
655 *
656 * This is called internally as well as by PGM when moving GC mappings.
657 *
658 * @param pVM The cross context VM structure.
659 * @param idCpuCaller The ID of the calling EMT if available to the caller,
660 * otherwise NIL_VMCPUID.
661 *
662 * @remarks Caller holds the PGM lock.
663 */
664VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
665{
666#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
667 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
668 if (pVCpuCaller)
669 VMCPU_ASSERT_EMT(pVCpuCaller);
670 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
671
672 VMCC_FOR_EACH_VMCPU(pVM)
673 {
674# ifdef IEM_WITH_CODE_TLB
675 if (pVCpuCaller == pVCpu)
676 pVCpu->iem.s.cbInstrBufTotal = 0;
677# endif
678
679 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
680 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
681 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
682 { /* likely */}
683 else if (pVCpuCaller == pVCpu)
684 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
685 else
686 {
687 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
688 continue;
689 }
690 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
691 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
692 }
693 VMCC_FOR_EACH_VMCPU_END(pVM);
694
695#else
696 RT_NOREF(pVM, idCpuCaller);
697#endif
698}
699
700#ifdef IEM_WITH_CODE_TLB
701
702/**
703 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
704 * failure and jumps.
705 *
706 * We end up here for a number of reasons:
707 * - pbInstrBuf isn't yet initialized.
708 * - Advancing beyond the buffer boundrary (e.g. cross page).
709 * - Advancing beyond the CS segment limit.
710 * - Fetching from non-mappable page (e.g. MMIO).
711 *
712 * @param pVCpu The cross context virtual CPU structure of the
713 * calling thread.
714 * @param pvDst Where to return the bytes.
715 * @param cbDst Number of bytes to read.
716 *
717 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
718 */
719void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
720{
721#ifdef IN_RING3
722 for (;;)
723 {
724 Assert(cbDst <= 8);
725 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
726
727 /*
728 * We might have a partial buffer match, deal with that first to make the
729 * rest simpler. This is the first part of the cross page/buffer case.
730 */
731 if (pVCpu->iem.s.pbInstrBuf != NULL)
732 {
733 if (offBuf < pVCpu->iem.s.cbInstrBuf)
734 {
735 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
736 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
737 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
738
739 cbDst -= cbCopy;
740 pvDst = (uint8_t *)pvDst + cbCopy;
741 offBuf += cbCopy;
742 pVCpu->iem.s.offInstrNextByte += offBuf;
743 }
744 }
745
746 /*
747 * Check segment limit, figuring how much we're allowed to access at this point.
748 *
749 * We will fault immediately if RIP is past the segment limit / in non-canonical
750 * territory. If we do continue, there are one or more bytes to read before we
751 * end up in trouble and we need to do that first before faulting.
752 */
753 RTGCPTR GCPtrFirst;
754 uint32_t cbMaxRead;
755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
756 {
757 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
758 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
759 { /* likely */ }
760 else
761 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
762 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
763 }
764 else
765 {
766 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
767 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
768 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
769 { /* likely */ }
770 else
771 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
772 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
773 if (cbMaxRead != 0)
774 { /* likely */ }
775 else
776 {
777 /* Overflowed because address is 0 and limit is max. */
778 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
779 cbMaxRead = X86_PAGE_SIZE;
780 }
781 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
782 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
783 if (cbMaxRead2 < cbMaxRead)
784 cbMaxRead = cbMaxRead2;
785 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
786 }
787
788 /*
789 * Get the TLB entry for this piece of code.
790 */
791 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
792 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
793 if (pTlbe->uTag == uTag)
794 {
795 /* likely when executing lots of code, otherwise unlikely */
796# ifdef VBOX_WITH_STATISTICS
797 pVCpu->iem.s.CodeTlb.cTlbHits++;
798# endif
799 }
800 else
801 {
802 pVCpu->iem.s.CodeTlb.cTlbMisses++;
803 PGMPTWALK Walk;
804 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
805 if (RT_FAILURE(rc))
806 {
807#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
808 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
809 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
810#endif
811 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
812 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
813 }
814
815 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
816 Assert(Walk.fSucceeded);
817 pTlbe->uTag = uTag;
818 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
819 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
820 pTlbe->GCPhys = Walk.GCPhys;
821 pTlbe->pbMappingR3 = NULL;
822 }
823
824 /*
825 * Check TLB page table level access flags.
826 */
827 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
828 {
829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
830 {
831 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
832 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
833 }
834 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
835 {
836 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
837 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
838 }
839 }
840
841 /*
842 * Look up the physical page info if necessary.
843 */
844 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
845 { /* not necessary */ }
846 else
847 {
848 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
849 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
850 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
851 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
852 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
853 { /* likely */ }
854 else
855 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
856 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
857 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
858 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
859 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
860 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
861 }
862
863# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
864 /*
865 * Try do a direct read using the pbMappingR3 pointer.
866 */
867 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
868 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
869 {
870 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
871 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
872 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
873 {
874 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
875 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
876 }
877 else
878 {
879 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
880 Assert(cbInstr < cbMaxRead);
881 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
882 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
883 }
884 if (cbDst <= cbMaxRead)
885 {
886 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
887 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
888 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
889 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
890 return;
891 }
892 pVCpu->iem.s.pbInstrBuf = NULL;
893
894 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
895 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
896 }
897 else
898# endif
899#if 0
900 /*
901 * If there is no special read handling, so we can read a bit more and
902 * put it in the prefetch buffer.
903 */
904 if ( cbDst < cbMaxRead
905 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
906 {
907 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
908 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
909 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
910 { /* likely */ }
911 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
912 {
913 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
914 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
915 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
916 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
917 }
918 else
919 {
920 Log((RT_SUCCESS(rcStrict)
921 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
922 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
923 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
924 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
925 }
926 }
927 /*
928 * Special read handling, so only read exactly what's needed.
929 * This is a highly unlikely scenario.
930 */
931 else
932#endif
933 {
934 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
935 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
936 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
937 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
938 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
939 { /* likely */ }
940 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
941 {
942 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
943 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
944 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
945 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
946 }
947 else
948 {
949 Log((RT_SUCCESS(rcStrict)
950 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
951 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
952 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
953 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
954 }
955 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
956 if (cbToRead == cbDst)
957 return;
958 }
959
960 /*
961 * More to read, loop.
962 */
963 cbDst -= cbMaxRead;
964 pvDst = (uint8_t *)pvDst + cbMaxRead;
965 }
966#else
967 RT_NOREF(pvDst, cbDst);
968 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
969#endif
970}
971
972#else
973
974/**
975 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
976 * exception if it fails.
977 *
978 * @returns Strict VBox status code.
979 * @param pVCpu The cross context virtual CPU structure of the
980 * calling thread.
981 * @param cbMin The minimum number of bytes relative offOpcode
982 * that must be read.
983 */
984VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
985{
986 /*
987 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
988 *
989 * First translate CS:rIP to a physical address.
990 */
991 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
992 uint32_t cbToTryRead;
993 RTGCPTR GCPtrNext;
994 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
995 {
996 cbToTryRead = GUEST_PAGE_SIZE;
997 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
998 if (!IEM_IS_CANONICAL(GCPtrNext))
999 return iemRaiseGeneralProtectionFault0(pVCpu);
1000 }
1001 else
1002 {
1003 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1004 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1005 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1006 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1007 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1008 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1009 if (!cbToTryRead) /* overflowed */
1010 {
1011 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1012 cbToTryRead = UINT32_MAX;
1013 /** @todo check out wrapping around the code segment. */
1014 }
1015 if (cbToTryRead < cbMin - cbLeft)
1016 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1017 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1018 }
1019
1020 /* Only read up to the end of the page, and make sure we don't read more
1021 than the opcode buffer can hold. */
1022 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1023 if (cbToTryRead > cbLeftOnPage)
1024 cbToTryRead = cbLeftOnPage;
1025 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1026 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1027/** @todo r=bird: Convert assertion into undefined opcode exception? */
1028 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1029
1030 PGMPTWALK Walk;
1031 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1032 if (RT_FAILURE(rc))
1033 {
1034 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1035#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1036 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1037 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1038#endif
1039 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1040 }
1041 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1042 {
1043 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1044#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1045 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1046 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1047#endif
1048 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1049 }
1050 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1051 {
1052 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1053#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1054 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1055 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1056#endif
1057 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1058 }
1059 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1060 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1061 /** @todo Check reserved bits and such stuff. PGM is better at doing
1062 * that, so do it when implementing the guest virtual address
1063 * TLB... */
1064
1065 /*
1066 * Read the bytes at this address.
1067 *
1068 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1069 * and since PATM should only patch the start of an instruction there
1070 * should be no need to check again here.
1071 */
1072 if (!pVCpu->iem.s.fBypassHandlers)
1073 {
1074 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1075 cbToTryRead, PGMACCESSORIGIN_IEM);
1076 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1077 { /* likely */ }
1078 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1079 {
1080 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1081 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1082 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1083 }
1084 else
1085 {
1086 Log((RT_SUCCESS(rcStrict)
1087 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1088 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1089 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1090 return rcStrict;
1091 }
1092 }
1093 else
1094 {
1095 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1096 if (RT_SUCCESS(rc))
1097 { /* likely */ }
1098 else
1099 {
1100 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1101 return rc;
1102 }
1103 }
1104 pVCpu->iem.s.cbOpcode += cbToTryRead;
1105 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1106
1107 return VINF_SUCCESS;
1108}
1109
1110#endif /* !IEM_WITH_CODE_TLB */
1111#ifndef IEM_WITH_SETJMP
1112
1113/**
1114 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1115 *
1116 * @returns Strict VBox status code.
1117 * @param pVCpu The cross context virtual CPU structure of the
1118 * calling thread.
1119 * @param pb Where to return the opcode byte.
1120 */
1121VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1122{
1123 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1124 if (rcStrict == VINF_SUCCESS)
1125 {
1126 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1127 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1128 pVCpu->iem.s.offOpcode = offOpcode + 1;
1129 }
1130 else
1131 *pb = 0;
1132 return rcStrict;
1133}
1134
1135#else /* IEM_WITH_SETJMP */
1136
1137/**
1138 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1139 *
1140 * @returns The opcode byte.
1141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1142 */
1143uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1144{
1145# ifdef IEM_WITH_CODE_TLB
1146 uint8_t u8;
1147 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1148 return u8;
1149# else
1150 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1151 if (rcStrict == VINF_SUCCESS)
1152 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1153 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1154# endif
1155}
1156
1157#endif /* IEM_WITH_SETJMP */
1158
1159#ifndef IEM_WITH_SETJMP
1160
1161/**
1162 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1163 *
1164 * @returns Strict VBox status code.
1165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1166 * @param pu16 Where to return the opcode dword.
1167 */
1168VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1169{
1170 uint8_t u8;
1171 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1172 if (rcStrict == VINF_SUCCESS)
1173 *pu16 = (int8_t)u8;
1174 return rcStrict;
1175}
1176
1177
1178/**
1179 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1180 *
1181 * @returns Strict VBox status code.
1182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1183 * @param pu32 Where to return the opcode dword.
1184 */
1185VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1186{
1187 uint8_t u8;
1188 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1189 if (rcStrict == VINF_SUCCESS)
1190 *pu32 = (int8_t)u8;
1191 return rcStrict;
1192}
1193
1194
1195/**
1196 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1197 *
1198 * @returns Strict VBox status code.
1199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1200 * @param pu64 Where to return the opcode qword.
1201 */
1202VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1203{
1204 uint8_t u8;
1205 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1206 if (rcStrict == VINF_SUCCESS)
1207 *pu64 = (int8_t)u8;
1208 return rcStrict;
1209}
1210
1211#endif /* !IEM_WITH_SETJMP */
1212
1213
1214#ifndef IEM_WITH_SETJMP
1215
1216/**
1217 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1218 *
1219 * @returns Strict VBox status code.
1220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1221 * @param pu16 Where to return the opcode word.
1222 */
1223VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1224{
1225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1226 if (rcStrict == VINF_SUCCESS)
1227 {
1228 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1229# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1230 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1231# else
1232 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1233# endif
1234 pVCpu->iem.s.offOpcode = offOpcode + 2;
1235 }
1236 else
1237 *pu16 = 0;
1238 return rcStrict;
1239}
1240
1241#else /* IEM_WITH_SETJMP */
1242
1243/**
1244 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1245 *
1246 * @returns The opcode word.
1247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1248 */
1249uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1250{
1251# ifdef IEM_WITH_CODE_TLB
1252 uint16_t u16;
1253 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1254 return u16;
1255# else
1256 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1257 if (rcStrict == VINF_SUCCESS)
1258 {
1259 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1260 pVCpu->iem.s.offOpcode += 2;
1261# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1262 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1263# else
1264 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1265# endif
1266 }
1267 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1268# endif
1269}
1270
1271#endif /* IEM_WITH_SETJMP */
1272
1273#ifndef IEM_WITH_SETJMP
1274
1275/**
1276 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1280 * @param pu32 Where to return the opcode double word.
1281 */
1282VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1283{
1284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1285 if (rcStrict == VINF_SUCCESS)
1286 {
1287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1288 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1289 pVCpu->iem.s.offOpcode = offOpcode + 2;
1290 }
1291 else
1292 *pu32 = 0;
1293 return rcStrict;
1294}
1295
1296
1297/**
1298 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1299 *
1300 * @returns Strict VBox status code.
1301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1302 * @param pu64 Where to return the opcode quad word.
1303 */
1304VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1305{
1306 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1307 if (rcStrict == VINF_SUCCESS)
1308 {
1309 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1310 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1311 pVCpu->iem.s.offOpcode = offOpcode + 2;
1312 }
1313 else
1314 *pu64 = 0;
1315 return rcStrict;
1316}
1317
1318#endif /* !IEM_WITH_SETJMP */
1319
1320#ifndef IEM_WITH_SETJMP
1321
1322/**
1323 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1324 *
1325 * @returns Strict VBox status code.
1326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1327 * @param pu32 Where to return the opcode dword.
1328 */
1329VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1330{
1331 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1332 if (rcStrict == VINF_SUCCESS)
1333 {
1334 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1335# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1336 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1337# else
1338 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1339 pVCpu->iem.s.abOpcode[offOpcode + 1],
1340 pVCpu->iem.s.abOpcode[offOpcode + 2],
1341 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1342# endif
1343 pVCpu->iem.s.offOpcode = offOpcode + 4;
1344 }
1345 else
1346 *pu32 = 0;
1347 return rcStrict;
1348}
1349
1350#else /* IEM_WITH_SETJMP */
1351
1352/**
1353 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1354 *
1355 * @returns The opcode dword.
1356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1357 */
1358uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1359{
1360# ifdef IEM_WITH_CODE_TLB
1361 uint32_t u32;
1362 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1363 return u32;
1364# else
1365 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1366 if (rcStrict == VINF_SUCCESS)
1367 {
1368 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1369 pVCpu->iem.s.offOpcode = offOpcode + 4;
1370# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1371 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1372# else
1373 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1374 pVCpu->iem.s.abOpcode[offOpcode + 1],
1375 pVCpu->iem.s.abOpcode[offOpcode + 2],
1376 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1377# endif
1378 }
1379 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1380# endif
1381}
1382
1383#endif /* IEM_WITH_SETJMP */
1384
1385#ifndef IEM_WITH_SETJMP
1386
1387/**
1388 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1389 *
1390 * @returns Strict VBox status code.
1391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1392 * @param pu64 Where to return the opcode dword.
1393 */
1394VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1395{
1396 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1397 if (rcStrict == VINF_SUCCESS)
1398 {
1399 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1400 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1401 pVCpu->iem.s.abOpcode[offOpcode + 1],
1402 pVCpu->iem.s.abOpcode[offOpcode + 2],
1403 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1404 pVCpu->iem.s.offOpcode = offOpcode + 4;
1405 }
1406 else
1407 *pu64 = 0;
1408 return rcStrict;
1409}
1410
1411
1412/**
1413 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1414 *
1415 * @returns Strict VBox status code.
1416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1417 * @param pu64 Where to return the opcode qword.
1418 */
1419VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1420{
1421 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1422 if (rcStrict == VINF_SUCCESS)
1423 {
1424 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1425 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1426 pVCpu->iem.s.abOpcode[offOpcode + 1],
1427 pVCpu->iem.s.abOpcode[offOpcode + 2],
1428 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1429 pVCpu->iem.s.offOpcode = offOpcode + 4;
1430 }
1431 else
1432 *pu64 = 0;
1433 return rcStrict;
1434}
1435
1436#endif /* !IEM_WITH_SETJMP */
1437
1438#ifndef IEM_WITH_SETJMP
1439
1440/**
1441 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1442 *
1443 * @returns Strict VBox status code.
1444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1445 * @param pu64 Where to return the opcode qword.
1446 */
1447VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1448{
1449 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1450 if (rcStrict == VINF_SUCCESS)
1451 {
1452 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1453# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1454 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1455# else
1456 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1457 pVCpu->iem.s.abOpcode[offOpcode + 1],
1458 pVCpu->iem.s.abOpcode[offOpcode + 2],
1459 pVCpu->iem.s.abOpcode[offOpcode + 3],
1460 pVCpu->iem.s.abOpcode[offOpcode + 4],
1461 pVCpu->iem.s.abOpcode[offOpcode + 5],
1462 pVCpu->iem.s.abOpcode[offOpcode + 6],
1463 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1464# endif
1465 pVCpu->iem.s.offOpcode = offOpcode + 8;
1466 }
1467 else
1468 *pu64 = 0;
1469 return rcStrict;
1470}
1471
1472#else /* IEM_WITH_SETJMP */
1473
1474/**
1475 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1476 *
1477 * @returns The opcode qword.
1478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1479 */
1480uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1481{
1482# ifdef IEM_WITH_CODE_TLB
1483 uint64_t u64;
1484 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1485 return u64;
1486# else
1487 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1491 pVCpu->iem.s.offOpcode = offOpcode + 8;
1492# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1493 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1494# else
1495 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1496 pVCpu->iem.s.abOpcode[offOpcode + 1],
1497 pVCpu->iem.s.abOpcode[offOpcode + 2],
1498 pVCpu->iem.s.abOpcode[offOpcode + 3],
1499 pVCpu->iem.s.abOpcode[offOpcode + 4],
1500 pVCpu->iem.s.abOpcode[offOpcode + 5],
1501 pVCpu->iem.s.abOpcode[offOpcode + 6],
1502 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1503# endif
1504 }
1505 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1506# endif
1507}
1508
1509#endif /* IEM_WITH_SETJMP */
1510
1511
1512
1513/** @name Misc Worker Functions.
1514 * @{
1515 */
1516
1517/**
1518 * Gets the exception class for the specified exception vector.
1519 *
1520 * @returns The class of the specified exception.
1521 * @param uVector The exception vector.
1522 */
1523static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1524{
1525 Assert(uVector <= X86_XCPT_LAST);
1526 switch (uVector)
1527 {
1528 case X86_XCPT_DE:
1529 case X86_XCPT_TS:
1530 case X86_XCPT_NP:
1531 case X86_XCPT_SS:
1532 case X86_XCPT_GP:
1533 case X86_XCPT_SX: /* AMD only */
1534 return IEMXCPTCLASS_CONTRIBUTORY;
1535
1536 case X86_XCPT_PF:
1537 case X86_XCPT_VE: /* Intel only */
1538 return IEMXCPTCLASS_PAGE_FAULT;
1539
1540 case X86_XCPT_DF:
1541 return IEMXCPTCLASS_DOUBLE_FAULT;
1542 }
1543 return IEMXCPTCLASS_BENIGN;
1544}
1545
1546
1547/**
1548 * Evaluates how to handle an exception caused during delivery of another event
1549 * (exception / interrupt).
1550 *
1551 * @returns How to handle the recursive exception.
1552 * @param pVCpu The cross context virtual CPU structure of the
1553 * calling thread.
1554 * @param fPrevFlags The flags of the previous event.
1555 * @param uPrevVector The vector of the previous event.
1556 * @param fCurFlags The flags of the current exception.
1557 * @param uCurVector The vector of the current exception.
1558 * @param pfXcptRaiseInfo Where to store additional information about the
1559 * exception condition. Optional.
1560 */
1561VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1562 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1563{
1564 /*
1565 * Only CPU exceptions can be raised while delivering other events, software interrupt
1566 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1567 */
1568 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1569 Assert(pVCpu); RT_NOREF(pVCpu);
1570 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1571
1572 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1573 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1574 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1575 {
1576 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1577 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1578 {
1579 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1580 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1581 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1582 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1583 {
1584 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1585 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1586 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1587 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1588 uCurVector, pVCpu->cpum.GstCtx.cr2));
1589 }
1590 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1591 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1592 {
1593 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1594 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1595 }
1596 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1597 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1598 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1599 {
1600 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1601 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1602 }
1603 }
1604 else
1605 {
1606 if (uPrevVector == X86_XCPT_NMI)
1607 {
1608 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1609 if (uCurVector == X86_XCPT_PF)
1610 {
1611 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1612 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1613 }
1614 }
1615 else if ( uPrevVector == X86_XCPT_AC
1616 && uCurVector == X86_XCPT_AC)
1617 {
1618 enmRaise = IEMXCPTRAISE_CPU_HANG;
1619 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1620 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1621 }
1622 }
1623 }
1624 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1625 {
1626 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1627 if (uCurVector == X86_XCPT_PF)
1628 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1629 }
1630 else
1631 {
1632 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1633 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1634 }
1635
1636 if (pfXcptRaiseInfo)
1637 *pfXcptRaiseInfo = fRaiseInfo;
1638 return enmRaise;
1639}
1640
1641
1642/**
1643 * Enters the CPU shutdown state initiated by a triple fault or other
1644 * unrecoverable conditions.
1645 *
1646 * @returns Strict VBox status code.
1647 * @param pVCpu The cross context virtual CPU structure of the
1648 * calling thread.
1649 */
1650static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1651{
1652 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1653 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1654
1655 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1656 {
1657 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1658 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1659 }
1660
1661 RT_NOREF(pVCpu);
1662 return VINF_EM_TRIPLE_FAULT;
1663}
1664
1665
1666/**
1667 * Validates a new SS segment.
1668 *
1669 * @returns VBox strict status code.
1670 * @param pVCpu The cross context virtual CPU structure of the
1671 * calling thread.
1672 * @param NewSS The new SS selctor.
1673 * @param uCpl The CPL to load the stack for.
1674 * @param pDesc Where to return the descriptor.
1675 */
1676static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1677{
1678 /* Null selectors are not allowed (we're not called for dispatching
1679 interrupts with SS=0 in long mode). */
1680 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1681 {
1682 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1683 return iemRaiseTaskSwitchFault0(pVCpu);
1684 }
1685
1686 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1687 if ((NewSS & X86_SEL_RPL) != uCpl)
1688 {
1689 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1690 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1691 }
1692
1693 /*
1694 * Read the descriptor.
1695 */
1696 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1697 if (rcStrict != VINF_SUCCESS)
1698 return rcStrict;
1699
1700 /*
1701 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1702 */
1703 if (!pDesc->Legacy.Gen.u1DescType)
1704 {
1705 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1706 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1707 }
1708
1709 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1710 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1711 {
1712 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1713 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1714 }
1715 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1716 {
1717 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1718 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1719 }
1720
1721 /* Is it there? */
1722 /** @todo testcase: Is this checked before the canonical / limit check below? */
1723 if (!pDesc->Legacy.Gen.u1Present)
1724 {
1725 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1726 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1727 }
1728
1729 return VINF_SUCCESS;
1730}
1731
1732/** @} */
1733
1734
1735/** @name Raising Exceptions.
1736 *
1737 * @{
1738 */
1739
1740
1741/**
1742 * Loads the specified stack far pointer from the TSS.
1743 *
1744 * @returns VBox strict status code.
1745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1746 * @param uCpl The CPL to load the stack for.
1747 * @param pSelSS Where to return the new stack segment.
1748 * @param puEsp Where to return the new stack pointer.
1749 */
1750static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1751{
1752 VBOXSTRICTRC rcStrict;
1753 Assert(uCpl < 4);
1754
1755 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1756 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1757 {
1758 /*
1759 * 16-bit TSS (X86TSS16).
1760 */
1761 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1762 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1763 {
1764 uint32_t off = uCpl * 4 + 2;
1765 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1766 {
1767 /** @todo check actual access pattern here. */
1768 uint32_t u32Tmp = 0; /* gcc maybe... */
1769 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1770 if (rcStrict == VINF_SUCCESS)
1771 {
1772 *puEsp = RT_LOWORD(u32Tmp);
1773 *pSelSS = RT_HIWORD(u32Tmp);
1774 return VINF_SUCCESS;
1775 }
1776 }
1777 else
1778 {
1779 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1780 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1781 }
1782 break;
1783 }
1784
1785 /*
1786 * 32-bit TSS (X86TSS32).
1787 */
1788 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1789 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1790 {
1791 uint32_t off = uCpl * 8 + 4;
1792 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1793 {
1794/** @todo check actual access pattern here. */
1795 uint64_t u64Tmp;
1796 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1797 if (rcStrict == VINF_SUCCESS)
1798 {
1799 *puEsp = u64Tmp & UINT32_MAX;
1800 *pSelSS = (RTSEL)(u64Tmp >> 32);
1801 return VINF_SUCCESS;
1802 }
1803 }
1804 else
1805 {
1806 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1807 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1808 }
1809 break;
1810 }
1811
1812 default:
1813 AssertFailed();
1814 rcStrict = VERR_IEM_IPE_4;
1815 break;
1816 }
1817
1818 *puEsp = 0; /* make gcc happy */
1819 *pSelSS = 0; /* make gcc happy */
1820 return rcStrict;
1821}
1822
1823
1824/**
1825 * Loads the specified stack pointer from the 64-bit TSS.
1826 *
1827 * @returns VBox strict status code.
1828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1829 * @param uCpl The CPL to load the stack for.
1830 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1831 * @param puRsp Where to return the new stack pointer.
1832 */
1833static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1834{
1835 Assert(uCpl < 4);
1836 Assert(uIst < 8);
1837 *puRsp = 0; /* make gcc happy */
1838
1839 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1840 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1841
1842 uint32_t off;
1843 if (uIst)
1844 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1845 else
1846 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1847 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1848 {
1849 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1850 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1851 }
1852
1853 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1854}
1855
1856
1857/**
1858 * Adjust the CPU state according to the exception being raised.
1859 *
1860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1861 * @param u8Vector The exception that has been raised.
1862 */
1863DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1864{
1865 switch (u8Vector)
1866 {
1867 case X86_XCPT_DB:
1868 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1869 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1870 break;
1871 /** @todo Read the AMD and Intel exception reference... */
1872 }
1873}
1874
1875
1876/**
1877 * Implements exceptions and interrupts for real mode.
1878 *
1879 * @returns VBox strict status code.
1880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1881 * @param cbInstr The number of bytes to offset rIP by in the return
1882 * address.
1883 * @param u8Vector The interrupt / exception vector number.
1884 * @param fFlags The flags.
1885 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1886 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1887 */
1888static VBOXSTRICTRC
1889iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1890 uint8_t cbInstr,
1891 uint8_t u8Vector,
1892 uint32_t fFlags,
1893 uint16_t uErr,
1894 uint64_t uCr2) RT_NOEXCEPT
1895{
1896 NOREF(uErr); NOREF(uCr2);
1897 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1898
1899 /*
1900 * Read the IDT entry.
1901 */
1902 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1903 {
1904 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1905 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1906 }
1907 RTFAR16 Idte;
1908 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1909 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1910 {
1911 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1912 return rcStrict;
1913 }
1914
1915 /*
1916 * Push the stack frame.
1917 */
1918 uint16_t *pu16Frame;
1919 uint64_t uNewRsp;
1920 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1921 if (rcStrict != VINF_SUCCESS)
1922 return rcStrict;
1923
1924 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1925#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1926 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1927 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1928 fEfl |= UINT16_C(0xf000);
1929#endif
1930 pu16Frame[2] = (uint16_t)fEfl;
1931 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1932 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1933 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1934 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1935 return rcStrict;
1936
1937 /*
1938 * Load the vector address into cs:ip and make exception specific state
1939 * adjustments.
1940 */
1941 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1942 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1943 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1944 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1945 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1946 pVCpu->cpum.GstCtx.rip = Idte.off;
1947 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1948 IEMMISC_SET_EFL(pVCpu, fEfl);
1949
1950 /** @todo do we actually do this in real mode? */
1951 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1952 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1953
1954 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1955}
1956
1957
1958/**
1959 * Loads a NULL data selector into when coming from V8086 mode.
1960 *
1961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1962 * @param pSReg Pointer to the segment register.
1963 */
1964DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1965{
1966 pSReg->Sel = 0;
1967 pSReg->ValidSel = 0;
1968 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1969 {
1970 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1971 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1972 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1973 }
1974 else
1975 {
1976 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1977 /** @todo check this on AMD-V */
1978 pSReg->u64Base = 0;
1979 pSReg->u32Limit = 0;
1980 }
1981}
1982
1983
1984/**
1985 * Loads a segment selector during a task switch in V8086 mode.
1986 *
1987 * @param pSReg Pointer to the segment register.
1988 * @param uSel The selector value to load.
1989 */
1990DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1991{
1992 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1993 pSReg->Sel = uSel;
1994 pSReg->ValidSel = uSel;
1995 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1996 pSReg->u64Base = uSel << 4;
1997 pSReg->u32Limit = 0xffff;
1998 pSReg->Attr.u = 0xf3;
1999}
2000
2001
2002/**
2003 * Loads a segment selector during a task switch in protected mode.
2004 *
2005 * In this task switch scenario, we would throw \#TS exceptions rather than
2006 * \#GPs.
2007 *
2008 * @returns VBox strict status code.
2009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2010 * @param pSReg Pointer to the segment register.
2011 * @param uSel The new selector value.
2012 *
2013 * @remarks This does _not_ handle CS or SS.
2014 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2015 */
2016static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2017{
2018 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2019
2020 /* Null data selector. */
2021 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2022 {
2023 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2025 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2026 return VINF_SUCCESS;
2027 }
2028
2029 /* Fetch the descriptor. */
2030 IEMSELDESC Desc;
2031 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2032 if (rcStrict != VINF_SUCCESS)
2033 {
2034 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2035 VBOXSTRICTRC_VAL(rcStrict)));
2036 return rcStrict;
2037 }
2038
2039 /* Must be a data segment or readable code segment. */
2040 if ( !Desc.Legacy.Gen.u1DescType
2041 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2042 {
2043 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2044 Desc.Legacy.Gen.u4Type));
2045 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2046 }
2047
2048 /* Check privileges for data segments and non-conforming code segments. */
2049 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2050 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2051 {
2052 /* The RPL and the new CPL must be less than or equal to the DPL. */
2053 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2054 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2055 {
2056 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2057 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2058 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2059 }
2060 }
2061
2062 /* Is it there? */
2063 if (!Desc.Legacy.Gen.u1Present)
2064 {
2065 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2066 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2067 }
2068
2069 /* The base and limit. */
2070 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2071 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2072
2073 /*
2074 * Ok, everything checked out fine. Now set the accessed bit before
2075 * committing the result into the registers.
2076 */
2077 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2078 {
2079 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2080 if (rcStrict != VINF_SUCCESS)
2081 return rcStrict;
2082 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2083 }
2084
2085 /* Commit */
2086 pSReg->Sel = uSel;
2087 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2088 pSReg->u32Limit = cbLimit;
2089 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2090 pSReg->ValidSel = uSel;
2091 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2092 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2093 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2094
2095 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2096 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2097 return VINF_SUCCESS;
2098}
2099
2100
2101/**
2102 * Performs a task switch.
2103 *
2104 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2105 * caller is responsible for performing the necessary checks (like DPL, TSS
2106 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2107 * reference for JMP, CALL, IRET.
2108 *
2109 * If the task switch is the due to a software interrupt or hardware exception,
2110 * the caller is responsible for validating the TSS selector and descriptor. See
2111 * Intel Instruction reference for INT n.
2112 *
2113 * @returns VBox strict status code.
2114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2115 * @param enmTaskSwitch The cause of the task switch.
2116 * @param uNextEip The EIP effective after the task switch.
2117 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2118 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2119 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2120 * @param SelTSS The TSS selector of the new task.
2121 * @param pNewDescTSS Pointer to the new TSS descriptor.
2122 */
2123VBOXSTRICTRC
2124iemTaskSwitch(PVMCPUCC pVCpu,
2125 IEMTASKSWITCH enmTaskSwitch,
2126 uint32_t uNextEip,
2127 uint32_t fFlags,
2128 uint16_t uErr,
2129 uint64_t uCr2,
2130 RTSEL SelTSS,
2131 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2132{
2133 Assert(!IEM_IS_REAL_MODE(pVCpu));
2134 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2135 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2136
2137 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2138 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2139 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2140 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2141 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2142
2143 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2144 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2145
2146 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2147 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2148
2149 /* Update CR2 in case it's a page-fault. */
2150 /** @todo This should probably be done much earlier in IEM/PGM. See
2151 * @bugref{5653#c49}. */
2152 if (fFlags & IEM_XCPT_FLAGS_CR2)
2153 pVCpu->cpum.GstCtx.cr2 = uCr2;
2154
2155 /*
2156 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2157 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2158 */
2159 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2160 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2161 if (uNewTSSLimit < uNewTSSLimitMin)
2162 {
2163 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2164 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2165 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2166 }
2167
2168 /*
2169 * Task switches in VMX non-root mode always cause task switches.
2170 * The new TSS must have been read and validated (DPL, limits etc.) before a
2171 * task-switch VM-exit commences.
2172 *
2173 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2174 */
2175 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2176 {
2177 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2178 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2179 }
2180
2181 /*
2182 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2183 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2184 */
2185 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2186 {
2187 uint32_t const uExitInfo1 = SelTSS;
2188 uint32_t uExitInfo2 = uErr;
2189 switch (enmTaskSwitch)
2190 {
2191 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2192 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2193 default: break;
2194 }
2195 if (fFlags & IEM_XCPT_FLAGS_ERR)
2196 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2197 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2198 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2199
2200 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2201 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2202 RT_NOREF2(uExitInfo1, uExitInfo2);
2203 }
2204
2205 /*
2206 * Check the current TSS limit. The last written byte to the current TSS during the
2207 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2208 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2209 *
2210 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2211 * end up with smaller than "legal" TSS limits.
2212 */
2213 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2214 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2215 if (uCurTSSLimit < uCurTSSLimitMin)
2216 {
2217 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2218 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2219 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2220 }
2221
2222 /*
2223 * Verify that the new TSS can be accessed and map it. Map only the required contents
2224 * and not the entire TSS.
2225 */
2226 void *pvNewTSS;
2227 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2228 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2229 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2230 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2231 * not perform correct translation if this happens. See Intel spec. 7.2.1
2232 * "Task-State Segment". */
2233 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2234 if (rcStrict != VINF_SUCCESS)
2235 {
2236 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2237 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2238 return rcStrict;
2239 }
2240
2241 /*
2242 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2243 */
2244 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2245 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2246 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2247 {
2248 PX86DESC pDescCurTSS;
2249 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2250 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2251 if (rcStrict != VINF_SUCCESS)
2252 {
2253 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2254 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2255 return rcStrict;
2256 }
2257
2258 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2259 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2260 if (rcStrict != VINF_SUCCESS)
2261 {
2262 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2263 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2264 return rcStrict;
2265 }
2266
2267 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2268 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2269 {
2270 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2271 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2272 u32EFlags &= ~X86_EFL_NT;
2273 }
2274 }
2275
2276 /*
2277 * Save the CPU state into the current TSS.
2278 */
2279 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2280 if (GCPtrNewTSS == GCPtrCurTSS)
2281 {
2282 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2283 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2284 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2285 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2286 pVCpu->cpum.GstCtx.ldtr.Sel));
2287 }
2288 if (fIsNewTSS386)
2289 {
2290 /*
2291 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2292 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2293 */
2294 void *pvCurTSS32;
2295 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2296 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2297 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2298 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2299 if (rcStrict != VINF_SUCCESS)
2300 {
2301 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2302 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2303 return rcStrict;
2304 }
2305
2306 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2307 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2308 pCurTSS32->eip = uNextEip;
2309 pCurTSS32->eflags = u32EFlags;
2310 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2311 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2312 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2313 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2314 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2315 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2316 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2317 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2318 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2319 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2320 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2321 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2322 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2323 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2324
2325 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2326 if (rcStrict != VINF_SUCCESS)
2327 {
2328 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2329 VBOXSTRICTRC_VAL(rcStrict)));
2330 return rcStrict;
2331 }
2332 }
2333 else
2334 {
2335 /*
2336 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2337 */
2338 void *pvCurTSS16;
2339 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2340 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2341 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2342 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2343 if (rcStrict != VINF_SUCCESS)
2344 {
2345 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2346 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2347 return rcStrict;
2348 }
2349
2350 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2351 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2352 pCurTSS16->ip = uNextEip;
2353 pCurTSS16->flags = u32EFlags;
2354 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2355 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2356 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2357 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2358 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2359 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2360 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2361 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2362 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2363 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2364 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2365 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2366
2367 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2368 if (rcStrict != VINF_SUCCESS)
2369 {
2370 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2371 VBOXSTRICTRC_VAL(rcStrict)));
2372 return rcStrict;
2373 }
2374 }
2375
2376 /*
2377 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2378 */
2379 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2380 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2381 {
2382 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2383 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2384 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2385 }
2386
2387 /*
2388 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2389 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2390 */
2391 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2392 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2393 bool fNewDebugTrap;
2394 if (fIsNewTSS386)
2395 {
2396 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2397 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2398 uNewEip = pNewTSS32->eip;
2399 uNewEflags = pNewTSS32->eflags;
2400 uNewEax = pNewTSS32->eax;
2401 uNewEcx = pNewTSS32->ecx;
2402 uNewEdx = pNewTSS32->edx;
2403 uNewEbx = pNewTSS32->ebx;
2404 uNewEsp = pNewTSS32->esp;
2405 uNewEbp = pNewTSS32->ebp;
2406 uNewEsi = pNewTSS32->esi;
2407 uNewEdi = pNewTSS32->edi;
2408 uNewES = pNewTSS32->es;
2409 uNewCS = pNewTSS32->cs;
2410 uNewSS = pNewTSS32->ss;
2411 uNewDS = pNewTSS32->ds;
2412 uNewFS = pNewTSS32->fs;
2413 uNewGS = pNewTSS32->gs;
2414 uNewLdt = pNewTSS32->selLdt;
2415 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2416 }
2417 else
2418 {
2419 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2420 uNewCr3 = 0;
2421 uNewEip = pNewTSS16->ip;
2422 uNewEflags = pNewTSS16->flags;
2423 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2424 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2425 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2426 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2427 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2428 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2429 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2430 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2431 uNewES = pNewTSS16->es;
2432 uNewCS = pNewTSS16->cs;
2433 uNewSS = pNewTSS16->ss;
2434 uNewDS = pNewTSS16->ds;
2435 uNewFS = 0;
2436 uNewGS = 0;
2437 uNewLdt = pNewTSS16->selLdt;
2438 fNewDebugTrap = false;
2439 }
2440
2441 if (GCPtrNewTSS == GCPtrCurTSS)
2442 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2443 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2444
2445 /*
2446 * We're done accessing the new TSS.
2447 */
2448 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2449 if (rcStrict != VINF_SUCCESS)
2450 {
2451 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2452 return rcStrict;
2453 }
2454
2455 /*
2456 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2457 */
2458 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2459 {
2460 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2461 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2462 if (rcStrict != VINF_SUCCESS)
2463 {
2464 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2465 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2466 return rcStrict;
2467 }
2468
2469 /* Check that the descriptor indicates the new TSS is available (not busy). */
2470 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2471 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2472 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2473
2474 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2475 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2476 if (rcStrict != VINF_SUCCESS)
2477 {
2478 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2479 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2480 return rcStrict;
2481 }
2482 }
2483
2484 /*
2485 * From this point on, we're technically in the new task. We will defer exceptions
2486 * until the completion of the task switch but before executing any instructions in the new task.
2487 */
2488 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2489 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2490 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2491 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2492 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2493 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2494 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2495
2496 /* Set the busy bit in TR. */
2497 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2498
2499 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2500 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2501 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2502 {
2503 uNewEflags |= X86_EFL_NT;
2504 }
2505
2506 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2507 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2508 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2509
2510 pVCpu->cpum.GstCtx.eip = uNewEip;
2511 pVCpu->cpum.GstCtx.eax = uNewEax;
2512 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2513 pVCpu->cpum.GstCtx.edx = uNewEdx;
2514 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2515 pVCpu->cpum.GstCtx.esp = uNewEsp;
2516 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2517 pVCpu->cpum.GstCtx.esi = uNewEsi;
2518 pVCpu->cpum.GstCtx.edi = uNewEdi;
2519
2520 uNewEflags &= X86_EFL_LIVE_MASK;
2521 uNewEflags |= X86_EFL_RA1_MASK;
2522 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2523
2524 /*
2525 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2526 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2527 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2528 */
2529 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2530 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2531
2532 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2533 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2534
2535 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2536 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2537
2538 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2539 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2540
2541 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2542 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2543
2544 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2545 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2546 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2547
2548 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2549 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2550 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2551 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2552
2553 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2554 {
2555 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2556 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2557 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2558 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2559 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2560 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2561 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2562 }
2563
2564 /*
2565 * Switch CR3 for the new task.
2566 */
2567 if ( fIsNewTSS386
2568 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2569 {
2570 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2571 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2572 AssertRCSuccessReturn(rc, rc);
2573
2574 /* Inform PGM. */
2575 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2576 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2577 AssertRCReturn(rc, rc);
2578 /* ignore informational status codes */
2579
2580 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2581 }
2582
2583 /*
2584 * Switch LDTR for the new task.
2585 */
2586 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2587 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2588 else
2589 {
2590 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2591
2592 IEMSELDESC DescNewLdt;
2593 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2594 if (rcStrict != VINF_SUCCESS)
2595 {
2596 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2597 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2598 return rcStrict;
2599 }
2600 if ( !DescNewLdt.Legacy.Gen.u1Present
2601 || DescNewLdt.Legacy.Gen.u1DescType
2602 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2603 {
2604 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2605 uNewLdt, DescNewLdt.Legacy.u));
2606 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2607 }
2608
2609 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2610 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2611 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2612 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2613 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2614 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2615 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2616 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2617 }
2618
2619 IEMSELDESC DescSS;
2620 if (IEM_IS_V86_MODE(pVCpu))
2621 {
2622 pVCpu->iem.s.uCpl = 3;
2623 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2624 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2625 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2626 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2627 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2628 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2629
2630 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2631 DescSS.Legacy.u = 0;
2632 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2633 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2634 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2635 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2636 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2637 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2638 DescSS.Legacy.Gen.u2Dpl = 3;
2639 }
2640 else
2641 {
2642 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2643
2644 /*
2645 * Load the stack segment for the new task.
2646 */
2647 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2648 {
2649 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2650 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2651 }
2652
2653 /* Fetch the descriptor. */
2654 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2655 if (rcStrict != VINF_SUCCESS)
2656 {
2657 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2658 VBOXSTRICTRC_VAL(rcStrict)));
2659 return rcStrict;
2660 }
2661
2662 /* SS must be a data segment and writable. */
2663 if ( !DescSS.Legacy.Gen.u1DescType
2664 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2665 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2666 {
2667 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2668 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2669 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2670 }
2671
2672 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2673 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2674 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2675 {
2676 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2677 uNewCpl));
2678 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2679 }
2680
2681 /* Is it there? */
2682 if (!DescSS.Legacy.Gen.u1Present)
2683 {
2684 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2685 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2686 }
2687
2688 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2689 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2690
2691 /* Set the accessed bit before committing the result into SS. */
2692 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2693 {
2694 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2695 if (rcStrict != VINF_SUCCESS)
2696 return rcStrict;
2697 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2698 }
2699
2700 /* Commit SS. */
2701 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2702 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2703 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2704 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2705 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2706 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2707 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2708
2709 /* CPL has changed, update IEM before loading rest of segments. */
2710 pVCpu->iem.s.uCpl = uNewCpl;
2711
2712 /*
2713 * Load the data segments for the new task.
2714 */
2715 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2716 if (rcStrict != VINF_SUCCESS)
2717 return rcStrict;
2718 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2719 if (rcStrict != VINF_SUCCESS)
2720 return rcStrict;
2721 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2722 if (rcStrict != VINF_SUCCESS)
2723 return rcStrict;
2724 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2725 if (rcStrict != VINF_SUCCESS)
2726 return rcStrict;
2727
2728 /*
2729 * Load the code segment for the new task.
2730 */
2731 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2732 {
2733 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2734 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2735 }
2736
2737 /* Fetch the descriptor. */
2738 IEMSELDESC DescCS;
2739 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2740 if (rcStrict != VINF_SUCCESS)
2741 {
2742 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2743 return rcStrict;
2744 }
2745
2746 /* CS must be a code segment. */
2747 if ( !DescCS.Legacy.Gen.u1DescType
2748 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2749 {
2750 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2751 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2752 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2753 }
2754
2755 /* For conforming CS, DPL must be less than or equal to the RPL. */
2756 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2757 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2758 {
2759 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2760 DescCS.Legacy.Gen.u2Dpl));
2761 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2762 }
2763
2764 /* For non-conforming CS, DPL must match RPL. */
2765 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2766 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2767 {
2768 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2769 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2770 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2771 }
2772
2773 /* Is it there? */
2774 if (!DescCS.Legacy.Gen.u1Present)
2775 {
2776 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2777 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2778 }
2779
2780 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2781 u64Base = X86DESC_BASE(&DescCS.Legacy);
2782
2783 /* Set the accessed bit before committing the result into CS. */
2784 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2785 {
2786 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2787 if (rcStrict != VINF_SUCCESS)
2788 return rcStrict;
2789 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2790 }
2791
2792 /* Commit CS. */
2793 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2794 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2795 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2796 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2797 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2798 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2799 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2800 }
2801
2802 /** @todo Debug trap. */
2803 if (fIsNewTSS386 && fNewDebugTrap)
2804 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2805
2806 /*
2807 * Construct the error code masks based on what caused this task switch.
2808 * See Intel Instruction reference for INT.
2809 */
2810 uint16_t uExt;
2811 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2812 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2813 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2814 {
2815 uExt = 1;
2816 }
2817 else
2818 uExt = 0;
2819
2820 /*
2821 * Push any error code on to the new stack.
2822 */
2823 if (fFlags & IEM_XCPT_FLAGS_ERR)
2824 {
2825 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2826 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2827 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2828
2829 /* Check that there is sufficient space on the stack. */
2830 /** @todo Factor out segment limit checking for normal/expand down segments
2831 * into a separate function. */
2832 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2833 {
2834 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2835 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2836 {
2837 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2838 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2839 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2840 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2841 }
2842 }
2843 else
2844 {
2845 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2846 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2847 {
2848 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2849 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2850 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2851 }
2852 }
2853
2854
2855 if (fIsNewTSS386)
2856 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2857 else
2858 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2859 if (rcStrict != VINF_SUCCESS)
2860 {
2861 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2862 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2863 return rcStrict;
2864 }
2865 }
2866
2867 /* Check the new EIP against the new CS limit. */
2868 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2869 {
2870 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2871 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2872 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2873 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2874 }
2875
2876 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2877 pVCpu->cpum.GstCtx.ss.Sel));
2878 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2879}
2880
2881
2882/**
2883 * Implements exceptions and interrupts for protected mode.
2884 *
2885 * @returns VBox strict status code.
2886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2887 * @param cbInstr The number of bytes to offset rIP by in the return
2888 * address.
2889 * @param u8Vector The interrupt / exception vector number.
2890 * @param fFlags The flags.
2891 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2892 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2893 */
2894static VBOXSTRICTRC
2895iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2896 uint8_t cbInstr,
2897 uint8_t u8Vector,
2898 uint32_t fFlags,
2899 uint16_t uErr,
2900 uint64_t uCr2) RT_NOEXCEPT
2901{
2902 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2903
2904 /*
2905 * Read the IDT entry.
2906 */
2907 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2908 {
2909 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2910 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2911 }
2912 X86DESC Idte;
2913 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2914 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2915 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2916 {
2917 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2918 return rcStrict;
2919 }
2920 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2921 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2922 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2923
2924 /*
2925 * Check the descriptor type, DPL and such.
2926 * ASSUMES this is done in the same order as described for call-gate calls.
2927 */
2928 if (Idte.Gate.u1DescType)
2929 {
2930 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2931 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2932 }
2933 bool fTaskGate = false;
2934 uint8_t f32BitGate = true;
2935 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2936 switch (Idte.Gate.u4Type)
2937 {
2938 case X86_SEL_TYPE_SYS_UNDEFINED:
2939 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2940 case X86_SEL_TYPE_SYS_LDT:
2941 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2942 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2943 case X86_SEL_TYPE_SYS_UNDEFINED2:
2944 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2945 case X86_SEL_TYPE_SYS_UNDEFINED3:
2946 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2947 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2948 case X86_SEL_TYPE_SYS_UNDEFINED4:
2949 {
2950 /** @todo check what actually happens when the type is wrong...
2951 * esp. call gates. */
2952 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2953 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2954 }
2955
2956 case X86_SEL_TYPE_SYS_286_INT_GATE:
2957 f32BitGate = false;
2958 RT_FALL_THRU();
2959 case X86_SEL_TYPE_SYS_386_INT_GATE:
2960 fEflToClear |= X86_EFL_IF;
2961 break;
2962
2963 case X86_SEL_TYPE_SYS_TASK_GATE:
2964 fTaskGate = true;
2965#ifndef IEM_IMPLEMENTS_TASKSWITCH
2966 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2967#endif
2968 break;
2969
2970 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2971 f32BitGate = false;
2972 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2973 break;
2974
2975 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2976 }
2977
2978 /* Check DPL against CPL if applicable. */
2979 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2980 {
2981 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2982 {
2983 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2984 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2985 }
2986 }
2987
2988 /* Is it there? */
2989 if (!Idte.Gate.u1Present)
2990 {
2991 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2992 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2993 }
2994
2995 /* Is it a task-gate? */
2996 if (fTaskGate)
2997 {
2998 /*
2999 * Construct the error code masks based on what caused this task switch.
3000 * See Intel Instruction reference for INT.
3001 */
3002 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3003 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3004 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3005 RTSEL SelTSS = Idte.Gate.u16Sel;
3006
3007 /*
3008 * Fetch the TSS descriptor in the GDT.
3009 */
3010 IEMSELDESC DescTSS;
3011 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3012 if (rcStrict != VINF_SUCCESS)
3013 {
3014 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3015 VBOXSTRICTRC_VAL(rcStrict)));
3016 return rcStrict;
3017 }
3018
3019 /* The TSS descriptor must be a system segment and be available (not busy). */
3020 if ( DescTSS.Legacy.Gen.u1DescType
3021 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3022 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3023 {
3024 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3025 u8Vector, SelTSS, DescTSS.Legacy.au64));
3026 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3027 }
3028
3029 /* The TSS must be present. */
3030 if (!DescTSS.Legacy.Gen.u1Present)
3031 {
3032 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3033 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3034 }
3035
3036 /* Do the actual task switch. */
3037 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3038 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3039 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3040 }
3041
3042 /* A null CS is bad. */
3043 RTSEL NewCS = Idte.Gate.u16Sel;
3044 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3045 {
3046 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3047 return iemRaiseGeneralProtectionFault0(pVCpu);
3048 }
3049
3050 /* Fetch the descriptor for the new CS. */
3051 IEMSELDESC DescCS;
3052 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3053 if (rcStrict != VINF_SUCCESS)
3054 {
3055 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3056 return rcStrict;
3057 }
3058
3059 /* Must be a code segment. */
3060 if (!DescCS.Legacy.Gen.u1DescType)
3061 {
3062 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3063 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3064 }
3065 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3066 {
3067 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3068 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3069 }
3070
3071 /* Don't allow lowering the privilege level. */
3072 /** @todo Does the lowering of privileges apply to software interrupts
3073 * only? This has bearings on the more-privileged or
3074 * same-privilege stack behavior further down. A testcase would
3075 * be nice. */
3076 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3077 {
3078 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3079 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3080 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3081 }
3082
3083 /* Make sure the selector is present. */
3084 if (!DescCS.Legacy.Gen.u1Present)
3085 {
3086 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3087 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3088 }
3089
3090 /* Check the new EIP against the new CS limit. */
3091 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3092 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3093 ? Idte.Gate.u16OffsetLow
3094 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3095 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3096 if (uNewEip > cbLimitCS)
3097 {
3098 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3099 u8Vector, uNewEip, cbLimitCS, NewCS));
3100 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3101 }
3102 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3103
3104 /* Calc the flag image to push. */
3105 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3106 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3107 fEfl &= ~X86_EFL_RF;
3108 else
3109 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3110
3111 /* From V8086 mode only go to CPL 0. */
3112 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3113 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3114 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3115 {
3116 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3117 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3118 }
3119
3120 /*
3121 * If the privilege level changes, we need to get a new stack from the TSS.
3122 * This in turns means validating the new SS and ESP...
3123 */
3124 if (uNewCpl != pVCpu->iem.s.uCpl)
3125 {
3126 RTSEL NewSS;
3127 uint32_t uNewEsp;
3128 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3129 if (rcStrict != VINF_SUCCESS)
3130 return rcStrict;
3131
3132 IEMSELDESC DescSS;
3133 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3134 if (rcStrict != VINF_SUCCESS)
3135 return rcStrict;
3136 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3137 if (!DescSS.Legacy.Gen.u1DefBig)
3138 {
3139 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3140 uNewEsp = (uint16_t)uNewEsp;
3141 }
3142
3143 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3144
3145 /* Check that there is sufficient space for the stack frame. */
3146 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3147 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3148 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3149 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3150
3151 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3152 {
3153 if ( uNewEsp - 1 > cbLimitSS
3154 || uNewEsp < cbStackFrame)
3155 {
3156 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3157 u8Vector, NewSS, uNewEsp, cbStackFrame));
3158 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3159 }
3160 }
3161 else
3162 {
3163 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3164 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3165 {
3166 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3167 u8Vector, NewSS, uNewEsp, cbStackFrame));
3168 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3169 }
3170 }
3171
3172 /*
3173 * Start making changes.
3174 */
3175
3176 /* Set the new CPL so that stack accesses use it. */
3177 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3178 pVCpu->iem.s.uCpl = uNewCpl;
3179
3180 /* Create the stack frame. */
3181 RTPTRUNION uStackFrame;
3182 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3183 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3184 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3185 if (rcStrict != VINF_SUCCESS)
3186 return rcStrict;
3187 void * const pvStackFrame = uStackFrame.pv;
3188 if (f32BitGate)
3189 {
3190 if (fFlags & IEM_XCPT_FLAGS_ERR)
3191 *uStackFrame.pu32++ = uErr;
3192 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3193 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3194 uStackFrame.pu32[2] = fEfl;
3195 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3196 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3197 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3198 if (fEfl & X86_EFL_VM)
3199 {
3200 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3201 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3202 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3203 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3204 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3205 }
3206 }
3207 else
3208 {
3209 if (fFlags & IEM_XCPT_FLAGS_ERR)
3210 *uStackFrame.pu16++ = uErr;
3211 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3212 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3213 uStackFrame.pu16[2] = fEfl;
3214 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3215 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3216 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3217 if (fEfl & X86_EFL_VM)
3218 {
3219 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3220 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3221 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3222 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3223 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3224 }
3225 }
3226 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3227 if (rcStrict != VINF_SUCCESS)
3228 return rcStrict;
3229
3230 /* Mark the selectors 'accessed' (hope this is the correct time). */
3231 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3232 * after pushing the stack frame? (Write protect the gdt + stack to
3233 * find out.) */
3234 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3235 {
3236 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3237 if (rcStrict != VINF_SUCCESS)
3238 return rcStrict;
3239 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3240 }
3241
3242 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3243 {
3244 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3245 if (rcStrict != VINF_SUCCESS)
3246 return rcStrict;
3247 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3248 }
3249
3250 /*
3251 * Start comitting the register changes (joins with the DPL=CPL branch).
3252 */
3253 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3254 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3255 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3256 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3257 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3258 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3259 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3260 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3261 * SP is loaded).
3262 * Need to check the other combinations too:
3263 * - 16-bit TSS, 32-bit handler
3264 * - 32-bit TSS, 16-bit handler */
3265 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3266 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3267 else
3268 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3269
3270 if (fEfl & X86_EFL_VM)
3271 {
3272 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3273 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3274 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3275 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3276 }
3277 }
3278 /*
3279 * Same privilege, no stack change and smaller stack frame.
3280 */
3281 else
3282 {
3283 uint64_t uNewRsp;
3284 RTPTRUNION uStackFrame;
3285 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3286 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3287 if (rcStrict != VINF_SUCCESS)
3288 return rcStrict;
3289 void * const pvStackFrame = uStackFrame.pv;
3290
3291 if (f32BitGate)
3292 {
3293 if (fFlags & IEM_XCPT_FLAGS_ERR)
3294 *uStackFrame.pu32++ = uErr;
3295 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3296 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3297 uStackFrame.pu32[2] = fEfl;
3298 }
3299 else
3300 {
3301 if (fFlags & IEM_XCPT_FLAGS_ERR)
3302 *uStackFrame.pu16++ = uErr;
3303 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3304 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3305 uStackFrame.pu16[2] = fEfl;
3306 }
3307 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3308 if (rcStrict != VINF_SUCCESS)
3309 return rcStrict;
3310
3311 /* Mark the CS selector as 'accessed'. */
3312 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3313 {
3314 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3315 if (rcStrict != VINF_SUCCESS)
3316 return rcStrict;
3317 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3318 }
3319
3320 /*
3321 * Start committing the register changes (joins with the other branch).
3322 */
3323 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3324 }
3325
3326 /* ... register committing continues. */
3327 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3328 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3329 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3330 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3331 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3332 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3333
3334 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3335 fEfl &= ~fEflToClear;
3336 IEMMISC_SET_EFL(pVCpu, fEfl);
3337
3338 if (fFlags & IEM_XCPT_FLAGS_CR2)
3339 pVCpu->cpum.GstCtx.cr2 = uCr2;
3340
3341 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3342 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3343
3344 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3345}
3346
3347
3348/**
3349 * Implements exceptions and interrupts for long mode.
3350 *
3351 * @returns VBox strict status code.
3352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3353 * @param cbInstr The number of bytes to offset rIP by in the return
3354 * address.
3355 * @param u8Vector The interrupt / exception vector number.
3356 * @param fFlags The flags.
3357 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3358 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3359 */
3360static VBOXSTRICTRC
3361iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3362 uint8_t cbInstr,
3363 uint8_t u8Vector,
3364 uint32_t fFlags,
3365 uint16_t uErr,
3366 uint64_t uCr2) RT_NOEXCEPT
3367{
3368 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3369
3370 /*
3371 * Read the IDT entry.
3372 */
3373 uint16_t offIdt = (uint16_t)u8Vector << 4;
3374 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3375 {
3376 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3377 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3378 }
3379 X86DESC64 Idte;
3380#ifdef _MSC_VER /* Shut up silly compiler warning. */
3381 Idte.au64[0] = 0;
3382 Idte.au64[1] = 0;
3383#endif
3384 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3385 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3386 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3387 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3388 {
3389 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3390 return rcStrict;
3391 }
3392 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3393 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3394 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3395
3396 /*
3397 * Check the descriptor type, DPL and such.
3398 * ASSUMES this is done in the same order as described for call-gate calls.
3399 */
3400 if (Idte.Gate.u1DescType)
3401 {
3402 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3403 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3404 }
3405 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3406 switch (Idte.Gate.u4Type)
3407 {
3408 case AMD64_SEL_TYPE_SYS_INT_GATE:
3409 fEflToClear |= X86_EFL_IF;
3410 break;
3411 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3412 break;
3413
3414 default:
3415 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3416 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3417 }
3418
3419 /* Check DPL against CPL if applicable. */
3420 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3421 {
3422 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3423 {
3424 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3425 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3426 }
3427 }
3428
3429 /* Is it there? */
3430 if (!Idte.Gate.u1Present)
3431 {
3432 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3433 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3434 }
3435
3436 /* A null CS is bad. */
3437 RTSEL NewCS = Idte.Gate.u16Sel;
3438 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3439 {
3440 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3441 return iemRaiseGeneralProtectionFault0(pVCpu);
3442 }
3443
3444 /* Fetch the descriptor for the new CS. */
3445 IEMSELDESC DescCS;
3446 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3447 if (rcStrict != VINF_SUCCESS)
3448 {
3449 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3450 return rcStrict;
3451 }
3452
3453 /* Must be a 64-bit code segment. */
3454 if (!DescCS.Long.Gen.u1DescType)
3455 {
3456 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3457 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3458 }
3459 if ( !DescCS.Long.Gen.u1Long
3460 || DescCS.Long.Gen.u1DefBig
3461 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3462 {
3463 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3464 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3465 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3466 }
3467
3468 /* Don't allow lowering the privilege level. For non-conforming CS
3469 selectors, the CS.DPL sets the privilege level the trap/interrupt
3470 handler runs at. For conforming CS selectors, the CPL remains
3471 unchanged, but the CS.DPL must be <= CPL. */
3472 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3473 * when CPU in Ring-0. Result \#GP? */
3474 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3475 {
3476 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3477 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3478 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3479 }
3480
3481
3482 /* Make sure the selector is present. */
3483 if (!DescCS.Legacy.Gen.u1Present)
3484 {
3485 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3486 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3487 }
3488
3489 /* Check that the new RIP is canonical. */
3490 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3491 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3492 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3493 if (!IEM_IS_CANONICAL(uNewRip))
3494 {
3495 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3496 return iemRaiseGeneralProtectionFault0(pVCpu);
3497 }
3498
3499 /*
3500 * If the privilege level changes or if the IST isn't zero, we need to get
3501 * a new stack from the TSS.
3502 */
3503 uint64_t uNewRsp;
3504 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3505 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3506 if ( uNewCpl != pVCpu->iem.s.uCpl
3507 || Idte.Gate.u3IST != 0)
3508 {
3509 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3510 if (rcStrict != VINF_SUCCESS)
3511 return rcStrict;
3512 }
3513 else
3514 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3515 uNewRsp &= ~(uint64_t)0xf;
3516
3517 /*
3518 * Calc the flag image to push.
3519 */
3520 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3521 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3522 fEfl &= ~X86_EFL_RF;
3523 else
3524 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3525
3526 /*
3527 * Start making changes.
3528 */
3529 /* Set the new CPL so that stack accesses use it. */
3530 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3531 pVCpu->iem.s.uCpl = uNewCpl;
3532
3533 /* Create the stack frame. */
3534 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3535 RTPTRUNION uStackFrame;
3536 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3537 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3538 if (rcStrict != VINF_SUCCESS)
3539 return rcStrict;
3540 void * const pvStackFrame = uStackFrame.pv;
3541
3542 if (fFlags & IEM_XCPT_FLAGS_ERR)
3543 *uStackFrame.pu64++ = uErr;
3544 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3545 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3546 uStackFrame.pu64[2] = fEfl;
3547 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3548 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3549 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3550 if (rcStrict != VINF_SUCCESS)
3551 return rcStrict;
3552
3553 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3554 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3555 * after pushing the stack frame? (Write protect the gdt + stack to
3556 * find out.) */
3557 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3558 {
3559 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3560 if (rcStrict != VINF_SUCCESS)
3561 return rcStrict;
3562 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3563 }
3564
3565 /*
3566 * Start comitting the register changes.
3567 */
3568 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3569 * hidden registers when interrupting 32-bit or 16-bit code! */
3570 if (uNewCpl != uOldCpl)
3571 {
3572 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3573 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3574 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3575 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3576 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3577 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3578 }
3579 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3580 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3581 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3582 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3583 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3584 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3585 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3586 pVCpu->cpum.GstCtx.rip = uNewRip;
3587
3588 fEfl &= ~fEflToClear;
3589 IEMMISC_SET_EFL(pVCpu, fEfl);
3590
3591 if (fFlags & IEM_XCPT_FLAGS_CR2)
3592 pVCpu->cpum.GstCtx.cr2 = uCr2;
3593
3594 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3595 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3596
3597 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3598}
3599
3600
3601/**
3602 * Implements exceptions and interrupts.
3603 *
3604 * All exceptions and interrupts goes thru this function!
3605 *
3606 * @returns VBox strict status code.
3607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3608 * @param cbInstr The number of bytes to offset rIP by in the return
3609 * address.
3610 * @param u8Vector The interrupt / exception vector number.
3611 * @param fFlags The flags.
3612 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3613 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3614 */
3615VBOXSTRICTRC
3616iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3617 uint8_t cbInstr,
3618 uint8_t u8Vector,
3619 uint32_t fFlags,
3620 uint16_t uErr,
3621 uint64_t uCr2) RT_NOEXCEPT
3622{
3623 /*
3624 * Get all the state that we might need here.
3625 */
3626 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3627 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3628
3629#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3630 /*
3631 * Flush prefetch buffer
3632 */
3633 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3634#endif
3635
3636 /*
3637 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3638 */
3639 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3640 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3641 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3642 | IEM_XCPT_FLAGS_BP_INSTR
3643 | IEM_XCPT_FLAGS_ICEBP_INSTR
3644 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3645 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3646 {
3647 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3648 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3649 u8Vector = X86_XCPT_GP;
3650 uErr = 0;
3651 }
3652#ifdef DBGFTRACE_ENABLED
3653 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3654 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3655 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3656#endif
3657
3658 /*
3659 * Evaluate whether NMI blocking should be in effect.
3660 * Normally, NMI blocking is in effect whenever we inject an NMI.
3661 */
3662 bool fBlockNmi;
3663 if ( u8Vector == X86_XCPT_NMI
3664 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3665 fBlockNmi = true;
3666 else
3667 fBlockNmi = false;
3668
3669#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3670 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3671 {
3672 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3673 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3674 return rcStrict0;
3675
3676 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3677 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3678 {
3679 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3680 fBlockNmi = false;
3681 }
3682 }
3683#endif
3684
3685#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3686 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3687 {
3688 /*
3689 * If the event is being injected as part of VMRUN, it isn't subject to event
3690 * intercepts in the nested-guest. However, secondary exceptions that occur
3691 * during injection of any event -are- subject to exception intercepts.
3692 *
3693 * See AMD spec. 15.20 "Event Injection".
3694 */
3695 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3696 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3697 else
3698 {
3699 /*
3700 * Check and handle if the event being raised is intercepted.
3701 */
3702 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3703 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3704 return rcStrict0;
3705 }
3706 }
3707#endif
3708
3709 /*
3710 * Set NMI blocking if necessary.
3711 */
3712 if ( fBlockNmi
3713 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3714 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3715
3716 /*
3717 * Do recursion accounting.
3718 */
3719 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3720 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3721 if (pVCpu->iem.s.cXcptRecursions == 0)
3722 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3723 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3724 else
3725 {
3726 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3727 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3728 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3729
3730 if (pVCpu->iem.s.cXcptRecursions >= 4)
3731 {
3732#ifdef DEBUG_bird
3733 AssertFailed();
3734#endif
3735 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3736 }
3737
3738 /*
3739 * Evaluate the sequence of recurring events.
3740 */
3741 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3742 NULL /* pXcptRaiseInfo */);
3743 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3744 { /* likely */ }
3745 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3746 {
3747 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3748 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3749 u8Vector = X86_XCPT_DF;
3750 uErr = 0;
3751#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3752 /* VMX nested-guest #DF intercept needs to be checked here. */
3753 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3754 {
3755 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3756 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3757 return rcStrict0;
3758 }
3759#endif
3760 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3761 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3762 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3763 }
3764 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3765 {
3766 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3767 return iemInitiateCpuShutdown(pVCpu);
3768 }
3769 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3770 {
3771 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3772 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3773 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3774 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3775 return VERR_EM_GUEST_CPU_HANG;
3776 }
3777 else
3778 {
3779 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3780 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3781 return VERR_IEM_IPE_9;
3782 }
3783
3784 /*
3785 * The 'EXT' bit is set when an exception occurs during deliver of an external
3786 * event (such as an interrupt or earlier exception)[1]. Privileged software
3787 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3788 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3789 *
3790 * [1] - Intel spec. 6.13 "Error Code"
3791 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3792 * [3] - Intel Instruction reference for INT n.
3793 */
3794 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3795 && (fFlags & IEM_XCPT_FLAGS_ERR)
3796 && u8Vector != X86_XCPT_PF
3797 && u8Vector != X86_XCPT_DF)
3798 {
3799 uErr |= X86_TRAP_ERR_EXTERNAL;
3800 }
3801 }
3802
3803 pVCpu->iem.s.cXcptRecursions++;
3804 pVCpu->iem.s.uCurXcpt = u8Vector;
3805 pVCpu->iem.s.fCurXcpt = fFlags;
3806 pVCpu->iem.s.uCurXcptErr = uErr;
3807 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3808
3809 /*
3810 * Extensive logging.
3811 */
3812#if defined(LOG_ENABLED) && defined(IN_RING3)
3813 if (LogIs3Enabled())
3814 {
3815 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3816 PVM pVM = pVCpu->CTX_SUFF(pVM);
3817 char szRegs[4096];
3818 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3819 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3820 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3821 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3822 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3823 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3824 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3825 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3826 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3827 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3828 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3829 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3830 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3831 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3832 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3833 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3834 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3835 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3836 " efer=%016VR{efer}\n"
3837 " pat=%016VR{pat}\n"
3838 " sf_mask=%016VR{sf_mask}\n"
3839 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3840 " lstar=%016VR{lstar}\n"
3841 " star=%016VR{star} cstar=%016VR{cstar}\n"
3842 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3843 );
3844
3845 char szInstr[256];
3846 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3847 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3848 szInstr, sizeof(szInstr), NULL);
3849 Log3(("%s%s\n", szRegs, szInstr));
3850 }
3851#endif /* LOG_ENABLED */
3852
3853 /*
3854 * Stats.
3855 */
3856 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3857 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3858 else if (u8Vector <= X86_XCPT_LAST)
3859 {
3860 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3861 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3862 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3863 }
3864
3865 /*
3866 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3867 * to ensure that a stale TLB or paging cache entry will only cause one
3868 * spurious #PF.
3869 */
3870 if ( u8Vector == X86_XCPT_PF
3871 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3872 IEMTlbInvalidatePage(pVCpu, uCr2);
3873
3874 /*
3875 * Call the mode specific worker function.
3876 */
3877 VBOXSTRICTRC rcStrict;
3878 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3879 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3880 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3881 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3882 else
3883 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3884
3885 /* Flush the prefetch buffer. */
3886#ifdef IEM_WITH_CODE_TLB
3887 pVCpu->iem.s.pbInstrBuf = NULL;
3888#else
3889 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3890#endif
3891
3892 /*
3893 * Unwind.
3894 */
3895 pVCpu->iem.s.cXcptRecursions--;
3896 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3897 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3898 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3899 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3900 pVCpu->iem.s.cXcptRecursions + 1));
3901 return rcStrict;
3902}
3903
3904#ifdef IEM_WITH_SETJMP
3905/**
3906 * See iemRaiseXcptOrInt. Will not return.
3907 */
3908DECL_NO_RETURN(void)
3909iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3910 uint8_t cbInstr,
3911 uint8_t u8Vector,
3912 uint32_t fFlags,
3913 uint16_t uErr,
3914 uint64_t uCr2) RT_NOEXCEPT
3915{
3916 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3917 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3918}
3919#endif
3920
3921
3922/** \#DE - 00. */
3923VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3924{
3925 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3926}
3927
3928
3929/** \#DB - 01.
3930 * @note This automatically clear DR7.GD. */
3931VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3932{
3933 /** @todo set/clear RF. */
3934 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3935 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3936}
3937
3938
3939/** \#BR - 05. */
3940VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3941{
3942 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3943}
3944
3945
3946/** \#UD - 06. */
3947VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3948{
3949 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3950}
3951
3952
3953/** \#NM - 07. */
3954VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3955{
3956 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3957}
3958
3959
3960/** \#TS(err) - 0a. */
3961VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3962{
3963 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3964}
3965
3966
3967/** \#TS(tr) - 0a. */
3968VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3969{
3970 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3971 pVCpu->cpum.GstCtx.tr.Sel, 0);
3972}
3973
3974
3975/** \#TS(0) - 0a. */
3976VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3977{
3978 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3979 0, 0);
3980}
3981
3982
3983/** \#TS(err) - 0a. */
3984VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3985{
3986 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3987 uSel & X86_SEL_MASK_OFF_RPL, 0);
3988}
3989
3990
3991/** \#NP(err) - 0b. */
3992VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3993{
3994 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3995}
3996
3997
3998/** \#NP(sel) - 0b. */
3999VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4000{
4001 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4002 uSel & ~X86_SEL_RPL, 0);
4003}
4004
4005
4006/** \#SS(seg) - 0c. */
4007VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4008{
4009 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4010 uSel & ~X86_SEL_RPL, 0);
4011}
4012
4013
4014/** \#SS(err) - 0c. */
4015VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4016{
4017 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4018}
4019
4020
4021/** \#GP(n) - 0d. */
4022VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4023{
4024 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4025}
4026
4027
4028/** \#GP(0) - 0d. */
4029VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4030{
4031 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4032}
4033
4034#ifdef IEM_WITH_SETJMP
4035/** \#GP(0) - 0d. */
4036DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4037{
4038 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4039}
4040#endif
4041
4042
4043/** \#GP(sel) - 0d. */
4044VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4045{
4046 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4047 Sel & ~X86_SEL_RPL, 0);
4048}
4049
4050
4051/** \#GP(0) - 0d. */
4052VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4053{
4054 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4055}
4056
4057
4058/** \#GP(sel) - 0d. */
4059VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4060{
4061 NOREF(iSegReg); NOREF(fAccess);
4062 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4063 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4064}
4065
4066#ifdef IEM_WITH_SETJMP
4067/** \#GP(sel) - 0d, longjmp. */
4068DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4069{
4070 NOREF(iSegReg); NOREF(fAccess);
4071 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4072 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4073}
4074#endif
4075
4076/** \#GP(sel) - 0d. */
4077VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4078{
4079 NOREF(Sel);
4080 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4081}
4082
4083#ifdef IEM_WITH_SETJMP
4084/** \#GP(sel) - 0d, longjmp. */
4085DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4086{
4087 NOREF(Sel);
4088 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4089}
4090#endif
4091
4092
4093/** \#GP(sel) - 0d. */
4094VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4095{
4096 NOREF(iSegReg); NOREF(fAccess);
4097 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4098}
4099
4100#ifdef IEM_WITH_SETJMP
4101/** \#GP(sel) - 0d, longjmp. */
4102DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4103{
4104 NOREF(iSegReg); NOREF(fAccess);
4105 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4106}
4107#endif
4108
4109
4110/** \#PF(n) - 0e. */
4111VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4112{
4113 uint16_t uErr;
4114 switch (rc)
4115 {
4116 case VERR_PAGE_NOT_PRESENT:
4117 case VERR_PAGE_TABLE_NOT_PRESENT:
4118 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4119 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4120 uErr = 0;
4121 break;
4122
4123 default:
4124 AssertMsgFailed(("%Rrc\n", rc));
4125 RT_FALL_THRU();
4126 case VERR_ACCESS_DENIED:
4127 uErr = X86_TRAP_PF_P;
4128 break;
4129
4130 /** @todo reserved */
4131 }
4132
4133 if (pVCpu->iem.s.uCpl == 3)
4134 uErr |= X86_TRAP_PF_US;
4135
4136 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4137 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4138 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4139 uErr |= X86_TRAP_PF_ID;
4140
4141#if 0 /* This is so much non-sense, really. Why was it done like that? */
4142 /* Note! RW access callers reporting a WRITE protection fault, will clear
4143 the READ flag before calling. So, read-modify-write accesses (RW)
4144 can safely be reported as READ faults. */
4145 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4146 uErr |= X86_TRAP_PF_RW;
4147#else
4148 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4149 {
4150 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4151 /// (regardless of outcome of the comparison in the latter case).
4152 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4153 uErr |= X86_TRAP_PF_RW;
4154 }
4155#endif
4156
4157 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4158 uErr, GCPtrWhere);
4159}
4160
4161#ifdef IEM_WITH_SETJMP
4162/** \#PF(n) - 0e, longjmp. */
4163DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4164{
4165 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4166}
4167#endif
4168
4169
4170/** \#MF(0) - 10. */
4171VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4172{
4173 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4174 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4175 else
4176 {
4177 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4178 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4179 iemRegUpdateRipAndClearRF(pVCpu);
4180 return VINF_SUCCESS;
4181 }
4182}
4183
4184
4185/** \#AC(0) - 11. */
4186VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4187{
4188 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4189}
4190
4191#ifdef IEM_WITH_SETJMP
4192/** \#AC(0) - 11, longjmp. */
4193DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4194{
4195 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4196}
4197#endif
4198
4199
4200/** \#XF(0)/\#XM(0) - 19. */
4201VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4204}
4205
4206
4207/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4208IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4209{
4210 NOREF(cbInstr);
4211 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4212}
4213
4214
4215/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4216IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4217{
4218 NOREF(cbInstr);
4219 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4220}
4221
4222
4223/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4224IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4225{
4226 NOREF(cbInstr);
4227 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4228}
4229
4230
4231/** @} */
4232
4233/** @name Common opcode decoders.
4234 * @{
4235 */
4236//#include <iprt/mem.h>
4237
4238/**
4239 * Used to add extra details about a stub case.
4240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4241 */
4242void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4243{
4244#if defined(LOG_ENABLED) && defined(IN_RING3)
4245 PVM pVM = pVCpu->CTX_SUFF(pVM);
4246 char szRegs[4096];
4247 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4248 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4249 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4250 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4251 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4252 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4253 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4254 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4255 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4256 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4257 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4258 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4259 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4260 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4261 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4262 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4263 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4264 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4265 " efer=%016VR{efer}\n"
4266 " pat=%016VR{pat}\n"
4267 " sf_mask=%016VR{sf_mask}\n"
4268 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4269 " lstar=%016VR{lstar}\n"
4270 " star=%016VR{star} cstar=%016VR{cstar}\n"
4271 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4272 );
4273
4274 char szInstr[256];
4275 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4276 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4277 szInstr, sizeof(szInstr), NULL);
4278
4279 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4280#else
4281 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4282#endif
4283}
4284
4285/** @} */
4286
4287
4288
4289/** @name Register Access.
4290 * @{
4291 */
4292
4293/**
4294 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4295 *
4296 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4297 * segment limit.
4298 *
4299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4300 * @param offNextInstr The offset of the next instruction.
4301 */
4302VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4303{
4304 switch (pVCpu->iem.s.enmEffOpSize)
4305 {
4306 case IEMMODE_16BIT:
4307 {
4308 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4309 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4310 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4311 return iemRaiseGeneralProtectionFault0(pVCpu);
4312 pVCpu->cpum.GstCtx.rip = uNewIp;
4313 break;
4314 }
4315
4316 case IEMMODE_32BIT:
4317 {
4318 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4319 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4320
4321 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4322 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4323 return iemRaiseGeneralProtectionFault0(pVCpu);
4324 pVCpu->cpum.GstCtx.rip = uNewEip;
4325 break;
4326 }
4327
4328 case IEMMODE_64BIT:
4329 {
4330 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4331
4332 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4333 if (!IEM_IS_CANONICAL(uNewRip))
4334 return iemRaiseGeneralProtectionFault0(pVCpu);
4335 pVCpu->cpum.GstCtx.rip = uNewRip;
4336 break;
4337 }
4338
4339 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4340 }
4341
4342 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4343
4344#ifndef IEM_WITH_CODE_TLB
4345 /* Flush the prefetch buffer. */
4346 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4347#endif
4348
4349 return VINF_SUCCESS;
4350}
4351
4352
4353/**
4354 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4355 *
4356 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4357 * segment limit.
4358 *
4359 * @returns Strict VBox status code.
4360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4361 * @param offNextInstr The offset of the next instruction.
4362 */
4363VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4364{
4365 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4366
4367 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4368 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4369 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4370 return iemRaiseGeneralProtectionFault0(pVCpu);
4371 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4372 pVCpu->cpum.GstCtx.rip = uNewIp;
4373 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4374
4375#ifndef IEM_WITH_CODE_TLB
4376 /* Flush the prefetch buffer. */
4377 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4378#endif
4379
4380 return VINF_SUCCESS;
4381}
4382
4383
4384/**
4385 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4386 *
4387 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4388 * segment limit.
4389 *
4390 * @returns Strict VBox status code.
4391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4392 * @param offNextInstr The offset of the next instruction.
4393 */
4394VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4395{
4396 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4397
4398 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4399 {
4400 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4401
4402 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4403 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4404 return iemRaiseGeneralProtectionFault0(pVCpu);
4405 pVCpu->cpum.GstCtx.rip = uNewEip;
4406 }
4407 else
4408 {
4409 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4410
4411 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4412 if (!IEM_IS_CANONICAL(uNewRip))
4413 return iemRaiseGeneralProtectionFault0(pVCpu);
4414 pVCpu->cpum.GstCtx.rip = uNewRip;
4415 }
4416 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4417
4418#ifndef IEM_WITH_CODE_TLB
4419 /* Flush the prefetch buffer. */
4420 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4421#endif
4422
4423 return VINF_SUCCESS;
4424}
4425
4426
4427/**
4428 * Performs a near jump to the specified address.
4429 *
4430 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4431 * segment limit.
4432 *
4433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4434 * @param uNewRip The new RIP value.
4435 */
4436VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4437{
4438 switch (pVCpu->iem.s.enmEffOpSize)
4439 {
4440 case IEMMODE_16BIT:
4441 {
4442 Assert(uNewRip <= UINT16_MAX);
4443 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4444 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4445 return iemRaiseGeneralProtectionFault0(pVCpu);
4446 /** @todo Test 16-bit jump in 64-bit mode. */
4447 pVCpu->cpum.GstCtx.rip = uNewRip;
4448 break;
4449 }
4450
4451 case IEMMODE_32BIT:
4452 {
4453 Assert(uNewRip <= UINT32_MAX);
4454 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4455 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4456
4457 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4458 return iemRaiseGeneralProtectionFault0(pVCpu);
4459 pVCpu->cpum.GstCtx.rip = uNewRip;
4460 break;
4461 }
4462
4463 case IEMMODE_64BIT:
4464 {
4465 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4466
4467 if (!IEM_IS_CANONICAL(uNewRip))
4468 return iemRaiseGeneralProtectionFault0(pVCpu);
4469 pVCpu->cpum.GstCtx.rip = uNewRip;
4470 break;
4471 }
4472
4473 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4474 }
4475
4476 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4477
4478#ifndef IEM_WITH_CODE_TLB
4479 /* Flush the prefetch buffer. */
4480 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4481#endif
4482
4483 return VINF_SUCCESS;
4484}
4485
4486/** @} */
4487
4488
4489/** @name FPU access and helpers.
4490 *
4491 * @{
4492 */
4493
4494/**
4495 * Updates the x87.DS and FPUDP registers.
4496 *
4497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4498 * @param pFpuCtx The FPU context.
4499 * @param iEffSeg The effective segment register.
4500 * @param GCPtrEff The effective address relative to @a iEffSeg.
4501 */
4502DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4503{
4504 RTSEL sel;
4505 switch (iEffSeg)
4506 {
4507 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4508 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4509 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4510 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4511 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4512 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4513 default:
4514 AssertMsgFailed(("%d\n", iEffSeg));
4515 sel = pVCpu->cpum.GstCtx.ds.Sel;
4516 }
4517 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4518 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4519 {
4520 pFpuCtx->DS = 0;
4521 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4522 }
4523 else if (!IEM_IS_LONG_MODE(pVCpu))
4524 {
4525 pFpuCtx->DS = sel;
4526 pFpuCtx->FPUDP = GCPtrEff;
4527 }
4528 else
4529 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4530}
4531
4532
4533/**
4534 * Rotates the stack registers in the push direction.
4535 *
4536 * @param pFpuCtx The FPU context.
4537 * @remarks This is a complete waste of time, but fxsave stores the registers in
4538 * stack order.
4539 */
4540DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4541{
4542 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4543 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4544 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4545 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4546 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4547 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4548 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4549 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4550 pFpuCtx->aRegs[0].r80 = r80Tmp;
4551}
4552
4553
4554/**
4555 * Rotates the stack registers in the pop direction.
4556 *
4557 * @param pFpuCtx The FPU context.
4558 * @remarks This is a complete waste of time, but fxsave stores the registers in
4559 * stack order.
4560 */
4561DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4562{
4563 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4564 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4565 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4566 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4567 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4568 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4569 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4570 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4571 pFpuCtx->aRegs[7].r80 = r80Tmp;
4572}
4573
4574
4575/**
4576 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4577 * exception prevents it.
4578 *
4579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4580 * @param pResult The FPU operation result to push.
4581 * @param pFpuCtx The FPU context.
4582 */
4583static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4584{
4585 /* Update FSW and bail if there are pending exceptions afterwards. */
4586 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4587 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4588 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4589 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4590 {
4591 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4592 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4593 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4594 pFpuCtx->FSW = fFsw;
4595 return;
4596 }
4597
4598 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4599 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4600 {
4601 /* All is fine, push the actual value. */
4602 pFpuCtx->FTW |= RT_BIT(iNewTop);
4603 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4604 }
4605 else if (pFpuCtx->FCW & X86_FCW_IM)
4606 {
4607 /* Masked stack overflow, push QNaN. */
4608 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4609 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4610 }
4611 else
4612 {
4613 /* Raise stack overflow, don't push anything. */
4614 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4615 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4616 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4617 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4618 return;
4619 }
4620
4621 fFsw &= ~X86_FSW_TOP_MASK;
4622 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4623 pFpuCtx->FSW = fFsw;
4624
4625 iemFpuRotateStackPush(pFpuCtx);
4626 RT_NOREF(pVCpu);
4627}
4628
4629
4630/**
4631 * Stores a result in a FPU register and updates the FSW and FTW.
4632 *
4633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4634 * @param pFpuCtx The FPU context.
4635 * @param pResult The result to store.
4636 * @param iStReg Which FPU register to store it in.
4637 */
4638static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4639{
4640 Assert(iStReg < 8);
4641 uint16_t fNewFsw = pFpuCtx->FSW;
4642 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4643 fNewFsw &= ~X86_FSW_C_MASK;
4644 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4645 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4646 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4647 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4648 pFpuCtx->FSW = fNewFsw;
4649 pFpuCtx->FTW |= RT_BIT(iReg);
4650 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4651 RT_NOREF(pVCpu);
4652}
4653
4654
4655/**
4656 * Only updates the FPU status word (FSW) with the result of the current
4657 * instruction.
4658 *
4659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4660 * @param pFpuCtx The FPU context.
4661 * @param u16FSW The FSW output of the current instruction.
4662 */
4663static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4664{
4665 uint16_t fNewFsw = pFpuCtx->FSW;
4666 fNewFsw &= ~X86_FSW_C_MASK;
4667 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4668 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4669 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4670 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4671 pFpuCtx->FSW = fNewFsw;
4672 RT_NOREF(pVCpu);
4673}
4674
4675
4676/**
4677 * Pops one item off the FPU stack if no pending exception prevents it.
4678 *
4679 * @param pFpuCtx The FPU context.
4680 */
4681static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4682{
4683 /* Check pending exceptions. */
4684 uint16_t uFSW = pFpuCtx->FSW;
4685 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4686 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4687 return;
4688
4689 /* TOP--. */
4690 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4691 uFSW &= ~X86_FSW_TOP_MASK;
4692 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4693 pFpuCtx->FSW = uFSW;
4694
4695 /* Mark the previous ST0 as empty. */
4696 iOldTop >>= X86_FSW_TOP_SHIFT;
4697 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4698
4699 /* Rotate the registers. */
4700 iemFpuRotateStackPop(pFpuCtx);
4701}
4702
4703
4704/**
4705 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4706 *
4707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4708 * @param pResult The FPU operation result to push.
4709 */
4710void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4711{
4712 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4713 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4714 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4715}
4716
4717
4718/**
4719 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4720 * and sets FPUDP and FPUDS.
4721 *
4722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4723 * @param pResult The FPU operation result to push.
4724 * @param iEffSeg The effective segment register.
4725 * @param GCPtrEff The effective address relative to @a iEffSeg.
4726 */
4727void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4728{
4729 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4730 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4731 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4732 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4733}
4734
4735
4736/**
4737 * Replace ST0 with the first value and push the second onto the FPU stack,
4738 * unless a pending exception prevents it.
4739 *
4740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4741 * @param pResult The FPU operation result to store and push.
4742 */
4743void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4744{
4745 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4746 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4747
4748 /* Update FSW and bail if there are pending exceptions afterwards. */
4749 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4750 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4751 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4752 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4753 {
4754 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4755 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4756 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4757 pFpuCtx->FSW = fFsw;
4758 return;
4759 }
4760
4761 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4762 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4763 {
4764 /* All is fine, push the actual value. */
4765 pFpuCtx->FTW |= RT_BIT(iNewTop);
4766 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4767 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4768 }
4769 else if (pFpuCtx->FCW & X86_FCW_IM)
4770 {
4771 /* Masked stack overflow, push QNaN. */
4772 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4773 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4774 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4775 }
4776 else
4777 {
4778 /* Raise stack overflow, don't push anything. */
4779 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4780 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4781 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4782 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4783 return;
4784 }
4785
4786 fFsw &= ~X86_FSW_TOP_MASK;
4787 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4788 pFpuCtx->FSW = fFsw;
4789
4790 iemFpuRotateStackPush(pFpuCtx);
4791}
4792
4793
4794/**
4795 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4796 * FOP.
4797 *
4798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4799 * @param pResult The result to store.
4800 * @param iStReg Which FPU register to store it in.
4801 */
4802void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4803{
4804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4805 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4806 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4807}
4808
4809
4810/**
4811 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4812 * FOP, and then pops the stack.
4813 *
4814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4815 * @param pResult The result to store.
4816 * @param iStReg Which FPU register to store it in.
4817 */
4818void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4819{
4820 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4821 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4822 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4823 iemFpuMaybePopOne(pFpuCtx);
4824}
4825
4826
4827/**
4828 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4829 * FPUDP, and FPUDS.
4830 *
4831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4832 * @param pResult The result to store.
4833 * @param iStReg Which FPU register to store it in.
4834 * @param iEffSeg The effective memory operand selector register.
4835 * @param GCPtrEff The effective memory operand offset.
4836 */
4837void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4838 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4839{
4840 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4841 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4842 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4843 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4844}
4845
4846
4847/**
4848 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4849 * FPUDP, and FPUDS, and then pops the stack.
4850 *
4851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4852 * @param pResult The result to store.
4853 * @param iStReg Which FPU register to store it in.
4854 * @param iEffSeg The effective memory operand selector register.
4855 * @param GCPtrEff The effective memory operand offset.
4856 */
4857void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4858 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4859{
4860 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4861 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4862 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4863 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4864 iemFpuMaybePopOne(pFpuCtx);
4865}
4866
4867
4868/**
4869 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4870 *
4871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4872 */
4873void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4874{
4875 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4876 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4877}
4878
4879
4880/**
4881 * Updates the FSW, FOP, FPUIP, and FPUCS.
4882 *
4883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4884 * @param u16FSW The FSW from the current instruction.
4885 */
4886void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4887{
4888 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4889 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4890 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4891}
4892
4893
4894/**
4895 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4896 *
4897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4898 * @param u16FSW The FSW from the current instruction.
4899 */
4900void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4901{
4902 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4903 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4904 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4905 iemFpuMaybePopOne(pFpuCtx);
4906}
4907
4908
4909/**
4910 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4911 *
4912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4913 * @param u16FSW The FSW from the current instruction.
4914 * @param iEffSeg The effective memory operand selector register.
4915 * @param GCPtrEff The effective memory operand offset.
4916 */
4917void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4918{
4919 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4920 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4921 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4922 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4923}
4924
4925
4926/**
4927 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4928 *
4929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4930 * @param u16FSW The FSW from the current instruction.
4931 */
4932void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4933{
4934 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4935 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4936 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4937 iemFpuMaybePopOne(pFpuCtx);
4938 iemFpuMaybePopOne(pFpuCtx);
4939}
4940
4941
4942/**
4943 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4944 *
4945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4946 * @param u16FSW The FSW from the current instruction.
4947 * @param iEffSeg The effective memory operand selector register.
4948 * @param GCPtrEff The effective memory operand offset.
4949 */
4950void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4951{
4952 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4953 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4954 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4955 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4956 iemFpuMaybePopOne(pFpuCtx);
4957}
4958
4959
4960/**
4961 * Worker routine for raising an FPU stack underflow exception.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pFpuCtx The FPU context.
4965 * @param iStReg The stack register being accessed.
4966 */
4967static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
4968{
4969 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4970 if (pFpuCtx->FCW & X86_FCW_IM)
4971 {
4972 /* Masked underflow. */
4973 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4974 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4975 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4976 if (iStReg != UINT8_MAX)
4977 {
4978 pFpuCtx->FTW |= RT_BIT(iReg);
4979 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4980 }
4981 }
4982 else
4983 {
4984 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4985 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4986 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
4987 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4988 }
4989 RT_NOREF(pVCpu);
4990}
4991
4992
4993/**
4994 * Raises a FPU stack underflow exception.
4995 *
4996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4997 * @param iStReg The destination register that should be loaded
4998 * with QNaN if \#IS is not masked. Specify
4999 * UINT8_MAX if none (like for fcom).
5000 */
5001void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5002{
5003 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5004 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5005 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5006}
5007
5008
5009void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5010{
5011 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5012 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5013 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5014 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5015}
5016
5017
5018void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5019{
5020 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5021 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5022 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5023 iemFpuMaybePopOne(pFpuCtx);
5024}
5025
5026
5027void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5028{
5029 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5030 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5031 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5032 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5033 iemFpuMaybePopOne(pFpuCtx);
5034}
5035
5036
5037void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5038{
5039 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5040 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5041 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5042 iemFpuMaybePopOne(pFpuCtx);
5043 iemFpuMaybePopOne(pFpuCtx);
5044}
5045
5046
5047void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5048{
5049 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5050 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5051
5052 if (pFpuCtx->FCW & X86_FCW_IM)
5053 {
5054 /* Masked overflow - Push QNaN. */
5055 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5056 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5057 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5058 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5059 pFpuCtx->FTW |= RT_BIT(iNewTop);
5060 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5061 iemFpuRotateStackPush(pFpuCtx);
5062 }
5063 else
5064 {
5065 /* Exception pending - don't change TOP or the register stack. */
5066 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5067 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5068 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5069 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5070 }
5071}
5072
5073
5074void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5075{
5076 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5077 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5078
5079 if (pFpuCtx->FCW & X86_FCW_IM)
5080 {
5081 /* Masked overflow - Push QNaN. */
5082 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5083 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5084 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5085 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5086 pFpuCtx->FTW |= RT_BIT(iNewTop);
5087 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5088 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5089 iemFpuRotateStackPush(pFpuCtx);
5090 }
5091 else
5092 {
5093 /* Exception pending - don't change TOP or the register stack. */
5094 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5095 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5096 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5097 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5098 }
5099}
5100
5101
5102/**
5103 * Worker routine for raising an FPU stack overflow exception on a push.
5104 *
5105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5106 * @param pFpuCtx The FPU context.
5107 */
5108static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5109{
5110 if (pFpuCtx->FCW & X86_FCW_IM)
5111 {
5112 /* Masked overflow. */
5113 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5114 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5115 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5116 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5117 pFpuCtx->FTW |= RT_BIT(iNewTop);
5118 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5119 iemFpuRotateStackPush(pFpuCtx);
5120 }
5121 else
5122 {
5123 /* Exception pending - don't change TOP or the register stack. */
5124 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5125 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5126 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5127 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5128 }
5129 RT_NOREF(pVCpu);
5130}
5131
5132
5133/**
5134 * Raises a FPU stack overflow exception on a push.
5135 *
5136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5137 */
5138void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5139{
5140 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5141 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5142 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5143}
5144
5145
5146/**
5147 * Raises a FPU stack overflow exception on a push with a memory operand.
5148 *
5149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5150 * @param iEffSeg The effective memory operand selector register.
5151 * @param GCPtrEff The effective memory operand offset.
5152 */
5153void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5154{
5155 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5156 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5157 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5158 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5159}
5160
5161/** @} */
5162
5163
5164/** @name SSE+AVX SIMD access and helpers.
5165 *
5166 * @{
5167 */
5168/**
5169 * Stores a result in a SIMD XMM register, updates the MXCSR.
5170 *
5171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5172 * @param pResult The result to store.
5173 * @param iXmmReg Which SIMD XMM register to store the result in.
5174 */
5175void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5176{
5177 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5178 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5179
5180 /* The result is only updated if there is no unmasked exception pending. */
5181 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5182 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5183 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5184}
5185
5186
5187/**
5188 * Updates the MXCSR.
5189 *
5190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5191 * @param fMxcsr The new MXCSR value.
5192 */
5193void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5194{
5195 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5196 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5197}
5198/** @} */
5199
5200
5201/** @name Memory access.
5202 *
5203 * @{
5204 */
5205
5206
5207/**
5208 * Updates the IEMCPU::cbWritten counter if applicable.
5209 *
5210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5211 * @param fAccess The access being accounted for.
5212 * @param cbMem The access size.
5213 */
5214DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5215{
5216 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5217 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5218 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5219}
5220
5221
5222/**
5223 * Applies the segment limit, base and attributes.
5224 *
5225 * This may raise a \#GP or \#SS.
5226 *
5227 * @returns VBox strict status code.
5228 *
5229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5230 * @param fAccess The kind of access which is being performed.
5231 * @param iSegReg The index of the segment register to apply.
5232 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5233 * TSS, ++).
5234 * @param cbMem The access size.
5235 * @param pGCPtrMem Pointer to the guest memory address to apply
5236 * segmentation to. Input and output parameter.
5237 */
5238VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5239{
5240 if (iSegReg == UINT8_MAX)
5241 return VINF_SUCCESS;
5242
5243 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5244 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5245 switch (pVCpu->iem.s.enmCpuMode)
5246 {
5247 case IEMMODE_16BIT:
5248 case IEMMODE_32BIT:
5249 {
5250 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5251 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5252
5253 if ( pSel->Attr.n.u1Present
5254 && !pSel->Attr.n.u1Unusable)
5255 {
5256 Assert(pSel->Attr.n.u1DescType);
5257 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5258 {
5259 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5260 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5261 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5262
5263 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5264 {
5265 /** @todo CPL check. */
5266 }
5267
5268 /*
5269 * There are two kinds of data selectors, normal and expand down.
5270 */
5271 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5272 {
5273 if ( GCPtrFirst32 > pSel->u32Limit
5274 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5275 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5276 }
5277 else
5278 {
5279 /*
5280 * The upper boundary is defined by the B bit, not the G bit!
5281 */
5282 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5283 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5284 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5285 }
5286 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5287 }
5288 else
5289 {
5290 /*
5291 * Code selector and usually be used to read thru, writing is
5292 * only permitted in real and V8086 mode.
5293 */
5294 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5295 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5296 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5297 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5298 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5299
5300 if ( GCPtrFirst32 > pSel->u32Limit
5301 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5302 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5303
5304 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5305 {
5306 /** @todo CPL check. */
5307 }
5308
5309 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5310 }
5311 }
5312 else
5313 return iemRaiseGeneralProtectionFault0(pVCpu);
5314 return VINF_SUCCESS;
5315 }
5316
5317 case IEMMODE_64BIT:
5318 {
5319 RTGCPTR GCPtrMem = *pGCPtrMem;
5320 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5321 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5322
5323 Assert(cbMem >= 1);
5324 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5325 return VINF_SUCCESS;
5326 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5327 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5328 return iemRaiseGeneralProtectionFault0(pVCpu);
5329 }
5330
5331 default:
5332 AssertFailedReturn(VERR_IEM_IPE_7);
5333 }
5334}
5335
5336
5337/**
5338 * Translates a virtual address to a physical physical address and checks if we
5339 * can access the page as specified.
5340 *
5341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5342 * @param GCPtrMem The virtual address.
5343 * @param fAccess The intended access.
5344 * @param pGCPhysMem Where to return the physical address.
5345 */
5346VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5347{
5348 /** @todo Need a different PGM interface here. We're currently using
5349 * generic / REM interfaces. this won't cut it for R0. */
5350 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5351 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5352 * here. */
5353 PGMPTWALK Walk;
5354 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5355 if (RT_FAILURE(rc))
5356 {
5357 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5358 /** @todo Check unassigned memory in unpaged mode. */
5359 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5360#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5361 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5362 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5363#endif
5364 *pGCPhysMem = NIL_RTGCPHYS;
5365 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5366 }
5367
5368 /* If the page is writable and does not have the no-exec bit set, all
5369 access is allowed. Otherwise we'll have to check more carefully... */
5370 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5371 {
5372 /* Write to read only memory? */
5373 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5374 && !(Walk.fEffective & X86_PTE_RW)
5375 && ( ( pVCpu->iem.s.uCpl == 3
5376 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5377 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5378 {
5379 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5380 *pGCPhysMem = NIL_RTGCPHYS;
5381#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5382 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5383 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5384#endif
5385 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5386 }
5387
5388 /* Kernel memory accessed by userland? */
5389 if ( !(Walk.fEffective & X86_PTE_US)
5390 && pVCpu->iem.s.uCpl == 3
5391 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5392 {
5393 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5394 *pGCPhysMem = NIL_RTGCPHYS;
5395#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5396 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5397 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5398#endif
5399 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5400 }
5401
5402 /* Executing non-executable memory? */
5403 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5404 && (Walk.fEffective & X86_PTE_PAE_NX)
5405 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5406 {
5407 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5408 *pGCPhysMem = NIL_RTGCPHYS;
5409#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5410 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5411 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5412#endif
5413 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5414 VERR_ACCESS_DENIED);
5415 }
5416 }
5417
5418 /*
5419 * Set the dirty / access flags.
5420 * ASSUMES this is set when the address is translated rather than on committ...
5421 */
5422 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5423 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5424 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5425 {
5426 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5427 AssertRC(rc2);
5428 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5429 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5430 }
5431
5432 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5433 *pGCPhysMem = GCPhys;
5434 return VINF_SUCCESS;
5435}
5436
5437
5438/**
5439 * Looks up a memory mapping entry.
5440 *
5441 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5443 * @param pvMem The memory address.
5444 * @param fAccess The access to.
5445 */
5446DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5447{
5448 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5449 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5450 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5451 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5452 return 0;
5453 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5454 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5455 return 1;
5456 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5457 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5458 return 2;
5459 return VERR_NOT_FOUND;
5460}
5461
5462
5463/**
5464 * Finds a free memmap entry when using iNextMapping doesn't work.
5465 *
5466 * @returns Memory mapping index, 1024 on failure.
5467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5468 */
5469static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5470{
5471 /*
5472 * The easy case.
5473 */
5474 if (pVCpu->iem.s.cActiveMappings == 0)
5475 {
5476 pVCpu->iem.s.iNextMapping = 1;
5477 return 0;
5478 }
5479
5480 /* There should be enough mappings for all instructions. */
5481 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5482
5483 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5484 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5485 return i;
5486
5487 AssertFailedReturn(1024);
5488}
5489
5490
5491/**
5492 * Commits a bounce buffer that needs writing back and unmaps it.
5493 *
5494 * @returns Strict VBox status code.
5495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5496 * @param iMemMap The index of the buffer to commit.
5497 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5498 * Always false in ring-3, obviously.
5499 */
5500static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5501{
5502 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5503 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5504#ifdef IN_RING3
5505 Assert(!fPostponeFail);
5506 RT_NOREF_PV(fPostponeFail);
5507#endif
5508
5509 /*
5510 * Do the writing.
5511 */
5512 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5513 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5514 {
5515 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5516 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5517 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5518 if (!pVCpu->iem.s.fBypassHandlers)
5519 {
5520 /*
5521 * Carefully and efficiently dealing with access handler return
5522 * codes make this a little bloated.
5523 */
5524 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5525 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5526 pbBuf,
5527 cbFirst,
5528 PGMACCESSORIGIN_IEM);
5529 if (rcStrict == VINF_SUCCESS)
5530 {
5531 if (cbSecond)
5532 {
5533 rcStrict = PGMPhysWrite(pVM,
5534 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5535 pbBuf + cbFirst,
5536 cbSecond,
5537 PGMACCESSORIGIN_IEM);
5538 if (rcStrict == VINF_SUCCESS)
5539 { /* nothing */ }
5540 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5541 {
5542 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5543 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5544 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5545 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5546 }
5547#ifndef IN_RING3
5548 else if (fPostponeFail)
5549 {
5550 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5551 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5552 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5553 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5554 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5555 return iemSetPassUpStatus(pVCpu, rcStrict);
5556 }
5557#endif
5558 else
5559 {
5560 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5561 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5562 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5563 return rcStrict;
5564 }
5565 }
5566 }
5567 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5568 {
5569 if (!cbSecond)
5570 {
5571 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5572 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5573 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5574 }
5575 else
5576 {
5577 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5578 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5579 pbBuf + cbFirst,
5580 cbSecond,
5581 PGMACCESSORIGIN_IEM);
5582 if (rcStrict2 == VINF_SUCCESS)
5583 {
5584 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5585 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5586 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5587 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5588 }
5589 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5590 {
5591 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5592 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5593 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5594 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5595 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5596 }
5597#ifndef IN_RING3
5598 else if (fPostponeFail)
5599 {
5600 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5601 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5602 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5603 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5604 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5605 return iemSetPassUpStatus(pVCpu, rcStrict);
5606 }
5607#endif
5608 else
5609 {
5610 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5611 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5612 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5613 return rcStrict2;
5614 }
5615 }
5616 }
5617#ifndef IN_RING3
5618 else if (fPostponeFail)
5619 {
5620 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5621 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5622 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5623 if (!cbSecond)
5624 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5625 else
5626 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5627 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5628 return iemSetPassUpStatus(pVCpu, rcStrict);
5629 }
5630#endif
5631 else
5632 {
5633 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5634 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5635 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5636 return rcStrict;
5637 }
5638 }
5639 else
5640 {
5641 /*
5642 * No access handlers, much simpler.
5643 */
5644 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5645 if (RT_SUCCESS(rc))
5646 {
5647 if (cbSecond)
5648 {
5649 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5650 if (RT_SUCCESS(rc))
5651 { /* likely */ }
5652 else
5653 {
5654 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5655 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5656 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5657 return rc;
5658 }
5659 }
5660 }
5661 else
5662 {
5663 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5664 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5665 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5666 return rc;
5667 }
5668 }
5669 }
5670
5671#if defined(IEM_LOG_MEMORY_WRITES)
5672 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5673 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5674 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5675 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5676 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5677 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5678
5679 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5680 g_cbIemWrote = cbWrote;
5681 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5682#endif
5683
5684 /*
5685 * Free the mapping entry.
5686 */
5687 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5688 Assert(pVCpu->iem.s.cActiveMappings != 0);
5689 pVCpu->iem.s.cActiveMappings--;
5690 return VINF_SUCCESS;
5691}
5692
5693
5694/**
5695 * iemMemMap worker that deals with a request crossing pages.
5696 */
5697static VBOXSTRICTRC
5698iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5699{
5700 /*
5701 * Do the address translations.
5702 */
5703 RTGCPHYS GCPhysFirst;
5704 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5705 if (rcStrict != VINF_SUCCESS)
5706 return rcStrict;
5707
5708 RTGCPHYS GCPhysSecond;
5709 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5710 fAccess, &GCPhysSecond);
5711 if (rcStrict != VINF_SUCCESS)
5712 return rcStrict;
5713 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5714
5715 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5716
5717 /*
5718 * Read in the current memory content if it's a read, execute or partial
5719 * write access.
5720 */
5721 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5722 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5723 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5724
5725 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5726 {
5727 if (!pVCpu->iem.s.fBypassHandlers)
5728 {
5729 /*
5730 * Must carefully deal with access handler status codes here,
5731 * makes the code a bit bloated.
5732 */
5733 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5734 if (rcStrict == VINF_SUCCESS)
5735 {
5736 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5737 if (rcStrict == VINF_SUCCESS)
5738 { /*likely */ }
5739 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5740 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5741 else
5742 {
5743 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5744 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5745 return rcStrict;
5746 }
5747 }
5748 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5749 {
5750 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5751 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5752 {
5753 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5754 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5755 }
5756 else
5757 {
5758 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5759 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5760 return rcStrict2;
5761 }
5762 }
5763 else
5764 {
5765 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5766 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5767 return rcStrict;
5768 }
5769 }
5770 else
5771 {
5772 /*
5773 * No informational status codes here, much more straight forward.
5774 */
5775 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5776 if (RT_SUCCESS(rc))
5777 {
5778 Assert(rc == VINF_SUCCESS);
5779 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5780 if (RT_SUCCESS(rc))
5781 Assert(rc == VINF_SUCCESS);
5782 else
5783 {
5784 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5785 return rc;
5786 }
5787 }
5788 else
5789 {
5790 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5791 return rc;
5792 }
5793 }
5794 }
5795#ifdef VBOX_STRICT
5796 else
5797 memset(pbBuf, 0xcc, cbMem);
5798 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5799 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5800#endif
5801 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
5802
5803 /*
5804 * Commit the bounce buffer entry.
5805 */
5806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5808 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5809 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5810 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5811 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5812 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5813 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5814 pVCpu->iem.s.cActiveMappings++;
5815
5816 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5817 *ppvMem = pbBuf;
5818 return VINF_SUCCESS;
5819}
5820
5821
5822/**
5823 * iemMemMap woker that deals with iemMemPageMap failures.
5824 */
5825static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5826 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5827{
5828 /*
5829 * Filter out conditions we can handle and the ones which shouldn't happen.
5830 */
5831 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5832 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5833 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5834 {
5835 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5836 return rcMap;
5837 }
5838 pVCpu->iem.s.cPotentialExits++;
5839
5840 /*
5841 * Read in the current memory content if it's a read, execute or partial
5842 * write access.
5843 */
5844 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5845 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5846 {
5847 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5848 memset(pbBuf, 0xff, cbMem);
5849 else
5850 {
5851 int rc;
5852 if (!pVCpu->iem.s.fBypassHandlers)
5853 {
5854 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5855 if (rcStrict == VINF_SUCCESS)
5856 { /* nothing */ }
5857 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5858 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5859 else
5860 {
5861 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5862 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5863 return rcStrict;
5864 }
5865 }
5866 else
5867 {
5868 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5869 if (RT_SUCCESS(rc))
5870 { /* likely */ }
5871 else
5872 {
5873 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5874 GCPhysFirst, rc));
5875 return rc;
5876 }
5877 }
5878 }
5879 }
5880#ifdef VBOX_STRICT
5881 else
5882 memset(pbBuf, 0xcc, cbMem);
5883#endif
5884#ifdef VBOX_STRICT
5885 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5886 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5887#endif
5888
5889 /*
5890 * Commit the bounce buffer entry.
5891 */
5892 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5893 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5895 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5896 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5897 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5898 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5899 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5900 pVCpu->iem.s.cActiveMappings++;
5901
5902 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5903 *ppvMem = pbBuf;
5904 return VINF_SUCCESS;
5905}
5906
5907
5908
5909/**
5910 * Maps the specified guest memory for the given kind of access.
5911 *
5912 * This may be using bounce buffering of the memory if it's crossing a page
5913 * boundary or if there is an access handler installed for any of it. Because
5914 * of lock prefix guarantees, we're in for some extra clutter when this
5915 * happens.
5916 *
5917 * This may raise a \#GP, \#SS, \#PF or \#AC.
5918 *
5919 * @returns VBox strict status code.
5920 *
5921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5922 * @param ppvMem Where to return the pointer to the mapped memory.
5923 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5924 * 8, 12, 16, 32 or 512. When used by string operations
5925 * it can be up to a page.
5926 * @param iSegReg The index of the segment register to use for this
5927 * access. The base and limits are checked. Use UINT8_MAX
5928 * to indicate that no segmentation is required (for IDT,
5929 * GDT and LDT accesses).
5930 * @param GCPtrMem The address of the guest memory.
5931 * @param fAccess How the memory is being accessed. The
5932 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5933 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5934 * when raising exceptions.
5935 * @param uAlignCtl Alignment control:
5936 * - Bits 15:0 is the alignment mask.
5937 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5938 * IEM_MEMMAP_F_ALIGN_SSE, and
5939 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5940 * Pass zero to skip alignment.
5941 */
5942VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5943 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5944{
5945 /*
5946 * Check the input and figure out which mapping entry to use.
5947 */
5948 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
5949 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
5950 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
5951 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5952 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5953
5954 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5955 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5956 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5957 {
5958 iMemMap = iemMemMapFindFree(pVCpu);
5959 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5960 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5961 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5962 pVCpu->iem.s.aMemMappings[2].fAccess),
5963 VERR_IEM_IPE_9);
5964 }
5965
5966 /*
5967 * Map the memory, checking that we can actually access it. If something
5968 * slightly complicated happens, fall back on bounce buffering.
5969 */
5970 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5971 if (rcStrict == VINF_SUCCESS)
5972 { /* likely */ }
5973 else
5974 return rcStrict;
5975
5976 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5977 { /* likely */ }
5978 else
5979 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5980
5981 /*
5982 * Alignment check.
5983 */
5984 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
5985 { /* likelyish */ }
5986 else
5987 {
5988 /* Misaligned access. */
5989 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
5990 {
5991 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
5992 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
5993 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
5994 {
5995 AssertCompile(X86_CR0_AM == X86_EFL_AC);
5996
5997 if (iemMemAreAlignmentChecksEnabled(pVCpu))
5998 return iemRaiseAlignmentCheckException(pVCpu);
5999 }
6000 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6001 && iemMemAreAlignmentChecksEnabled(pVCpu)
6002/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6003 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6004 )
6005 return iemRaiseAlignmentCheckException(pVCpu);
6006 else
6007 return iemRaiseGeneralProtectionFault0(pVCpu);
6008 }
6009 }
6010
6011#ifdef IEM_WITH_DATA_TLB
6012 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6013
6014 /*
6015 * Get the TLB entry for this page.
6016 */
6017 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6018 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6019 if (pTlbe->uTag == uTag)
6020 {
6021# ifdef VBOX_WITH_STATISTICS
6022 pVCpu->iem.s.DataTlb.cTlbHits++;
6023# endif
6024 }
6025 else
6026 {
6027 pVCpu->iem.s.DataTlb.cTlbMisses++;
6028 PGMPTWALK Walk;
6029 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6030 if (RT_FAILURE(rc))
6031 {
6032 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6033# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6034 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6035 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6036# endif
6037 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6038 }
6039
6040 Assert(Walk.fSucceeded);
6041 pTlbe->uTag = uTag;
6042 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6043 pTlbe->GCPhys = Walk.GCPhys;
6044 pTlbe->pbMappingR3 = NULL;
6045 }
6046
6047 /*
6048 * Check TLB page table level access flags.
6049 */
6050 /* If the page is either supervisor only or non-writable, we need to do
6051 more careful access checks. */
6052 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6053 {
6054 /* Write to read only memory? */
6055 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6056 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6057 && ( ( pVCpu->iem.s.uCpl == 3
6058 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6059 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6060 {
6061 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6062# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6063 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6064 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6065# endif
6066 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6067 }
6068
6069 /* Kernel memory accessed by userland? */
6070 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6071 && pVCpu->iem.s.uCpl == 3
6072 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6073 {
6074 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6075# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6076 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6077 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6078# endif
6079 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6080 }
6081 }
6082
6083 /*
6084 * Set the dirty / access flags.
6085 * ASSUMES this is set when the address is translated rather than on commit...
6086 */
6087 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6088 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6089 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6090 {
6091 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6092 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6093 AssertRC(rc2);
6094 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6095 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6096 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6097 }
6098
6099 /*
6100 * Look up the physical page info if necessary.
6101 */
6102 uint8_t *pbMem = NULL;
6103 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6104# ifdef IN_RING3
6105 pbMem = pTlbe->pbMappingR3;
6106# else
6107 pbMem = NULL;
6108# endif
6109 else
6110 {
6111 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6112 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6113 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6114 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6115 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6116 { /* likely */ }
6117 else
6118 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6119 pTlbe->pbMappingR3 = NULL;
6120 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6121 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6122 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6123 &pbMem, &pTlbe->fFlagsAndPhysRev);
6124 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6125# ifdef IN_RING3
6126 pTlbe->pbMappingR3 = pbMem;
6127# endif
6128 }
6129
6130 /*
6131 * Check the physical page level access and mapping.
6132 */
6133 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6134 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6135 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6136 { /* probably likely */ }
6137 else
6138 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6139 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6140 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6141 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6142 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6143 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6144
6145 if (pbMem)
6146 {
6147 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6148 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6149 fAccess |= IEM_ACCESS_NOT_LOCKED;
6150 }
6151 else
6152 {
6153 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6154 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6155 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6156 if (rcStrict != VINF_SUCCESS)
6157 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6158 }
6159
6160 void * const pvMem = pbMem;
6161
6162 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6163 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6164 if (fAccess & IEM_ACCESS_TYPE_READ)
6165 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6166
6167#else /* !IEM_WITH_DATA_TLB */
6168
6169 RTGCPHYS GCPhysFirst;
6170 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6171 if (rcStrict != VINF_SUCCESS)
6172 return rcStrict;
6173
6174 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6175 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6176 if (fAccess & IEM_ACCESS_TYPE_READ)
6177 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6178
6179 void *pvMem;
6180 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6181 if (rcStrict != VINF_SUCCESS)
6182 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6183
6184#endif /* !IEM_WITH_DATA_TLB */
6185
6186 /*
6187 * Fill in the mapping table entry.
6188 */
6189 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6190 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6191 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6192 pVCpu->iem.s.cActiveMappings += 1;
6193
6194 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6195 *ppvMem = pvMem;
6196
6197 return VINF_SUCCESS;
6198}
6199
6200
6201/**
6202 * Commits the guest memory if bounce buffered and unmaps it.
6203 *
6204 * @returns Strict VBox status code.
6205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6206 * @param pvMem The mapping.
6207 * @param fAccess The kind of access.
6208 */
6209VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6210{
6211 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6212 AssertReturn(iMemMap >= 0, iMemMap);
6213
6214 /* If it's bounce buffered, we may need to write back the buffer. */
6215 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6216 {
6217 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6218 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6219 }
6220 /* Otherwise unlock it. */
6221 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6222 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6223
6224 /* Free the entry. */
6225 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6226 Assert(pVCpu->iem.s.cActiveMappings != 0);
6227 pVCpu->iem.s.cActiveMappings--;
6228 return VINF_SUCCESS;
6229}
6230
6231#ifdef IEM_WITH_SETJMP
6232
6233/**
6234 * Maps the specified guest memory for the given kind of access, longjmp on
6235 * error.
6236 *
6237 * This may be using bounce buffering of the memory if it's crossing a page
6238 * boundary or if there is an access handler installed for any of it. Because
6239 * of lock prefix guarantees, we're in for some extra clutter when this
6240 * happens.
6241 *
6242 * This may raise a \#GP, \#SS, \#PF or \#AC.
6243 *
6244 * @returns Pointer to the mapped memory.
6245 *
6246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6247 * @param cbMem The number of bytes to map. This is usually 1,
6248 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6249 * string operations it can be up to a page.
6250 * @param iSegReg The index of the segment register to use for
6251 * this access. The base and limits are checked.
6252 * Use UINT8_MAX to indicate that no segmentation
6253 * is required (for IDT, GDT and LDT accesses).
6254 * @param GCPtrMem The address of the guest memory.
6255 * @param fAccess How the memory is being accessed. The
6256 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6257 * how to map the memory, while the
6258 * IEM_ACCESS_WHAT_XXX bit is used when raising
6259 * exceptions.
6260 * @param uAlignCtl Alignment control:
6261 * - Bits 15:0 is the alignment mask.
6262 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6263 * IEM_MEMMAP_F_ALIGN_SSE, and
6264 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6265 * Pass zero to skip alignment.
6266 */
6267void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6268 uint32_t uAlignCtl) RT_NOEXCEPT
6269{
6270 /*
6271 * Check the input, check segment access and adjust address
6272 * with segment base.
6273 */
6274 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6275 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6276 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6277
6278 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6279 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6280 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6281
6282 /*
6283 * Alignment check.
6284 */
6285 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6286 { /* likelyish */ }
6287 else
6288 {
6289 /* Misaligned access. */
6290 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6291 {
6292 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6293 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6294 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6295 {
6296 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6297
6298 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6299 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6300 }
6301 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6302 && iemMemAreAlignmentChecksEnabled(pVCpu)
6303/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6304 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6305 )
6306 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6307 else
6308 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6309 }
6310 }
6311
6312 /*
6313 * Figure out which mapping entry to use.
6314 */
6315 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6316 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6317 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6318 {
6319 iMemMap = iemMemMapFindFree(pVCpu);
6320 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6321 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6322 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6323 pVCpu->iem.s.aMemMappings[2].fAccess),
6324 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6325 }
6326
6327 /*
6328 * Crossing a page boundary?
6329 */
6330 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6331 { /* No (likely). */ }
6332 else
6333 {
6334 void *pvMem;
6335 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6336 if (rcStrict == VINF_SUCCESS)
6337 return pvMem;
6338 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6339 }
6340
6341#ifdef IEM_WITH_DATA_TLB
6342 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6343
6344 /*
6345 * Get the TLB entry for this page.
6346 */
6347 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6348 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6349 if (pTlbe->uTag == uTag)
6350 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6351 else
6352 {
6353 pVCpu->iem.s.DataTlb.cTlbMisses++;
6354 PGMPTWALK Walk;
6355 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6356 if (RT_FAILURE(rc))
6357 {
6358 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6359# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6360 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6361 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6362# endif
6363 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6364 }
6365
6366 Assert(Walk.fSucceeded);
6367 pTlbe->uTag = uTag;
6368 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6369 pTlbe->GCPhys = Walk.GCPhys;
6370 pTlbe->pbMappingR3 = NULL;
6371 }
6372
6373 /*
6374 * Check the flags and physical revision.
6375 */
6376 /** @todo make the caller pass these in with fAccess. */
6377 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6378 ? IEMTLBE_F_PT_NO_USER : 0;
6379 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6380 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6381 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6382 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6383 ? IEMTLBE_F_PT_NO_WRITE : 0)
6384 : 0;
6385 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6386 uint8_t *pbMem = NULL;
6387 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6388 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6389# ifdef IN_RING3
6390 pbMem = pTlbe->pbMappingR3;
6391# else
6392 pbMem = NULL;
6393# endif
6394 else
6395 {
6396 /*
6397 * Okay, something isn't quite right or needs refreshing.
6398 */
6399 /* Write to read only memory? */
6400 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6401 {
6402 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6403# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6404 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6405 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6406# endif
6407 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6408 }
6409
6410 /* Kernel memory accessed by userland? */
6411 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6412 {
6413 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6414# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6415 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6416 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6417# endif
6418 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6419 }
6420
6421 /* Set the dirty / access flags.
6422 ASSUMES this is set when the address is translated rather than on commit... */
6423 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6424 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6425 {
6426 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6427 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6428 AssertRC(rc2);
6429 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6430 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6431 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6432 }
6433
6434 /*
6435 * Check if the physical page info needs updating.
6436 */
6437 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6438# ifdef IN_RING3
6439 pbMem = pTlbe->pbMappingR3;
6440# else
6441 pbMem = NULL;
6442# endif
6443 else
6444 {
6445 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6446 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6447 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6448 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6449 pTlbe->pbMappingR3 = NULL;
6450 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6451 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6452 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6453 &pbMem, &pTlbe->fFlagsAndPhysRev);
6454 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6455# ifdef IN_RING3
6456 pTlbe->pbMappingR3 = pbMem;
6457# endif
6458 }
6459
6460 /*
6461 * Check the physical page level access and mapping.
6462 */
6463 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6464 { /* probably likely */ }
6465 else
6466 {
6467 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6468 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6469 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6470 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6471 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6472 if (rcStrict == VINF_SUCCESS)
6473 return pbMem;
6474 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6475 }
6476 }
6477 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6478
6479 if (pbMem)
6480 {
6481 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6482 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6483 fAccess |= IEM_ACCESS_NOT_LOCKED;
6484 }
6485 else
6486 {
6487 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6488 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6489 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6490 if (rcStrict == VINF_SUCCESS)
6491 return pbMem;
6492 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6493 }
6494
6495 void * const pvMem = pbMem;
6496
6497 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6498 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6499 if (fAccess & IEM_ACCESS_TYPE_READ)
6500 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6501
6502#else /* !IEM_WITH_DATA_TLB */
6503
6504
6505 RTGCPHYS GCPhysFirst;
6506 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6507 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6508 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6509
6510 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6511 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6512 if (fAccess & IEM_ACCESS_TYPE_READ)
6513 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6514
6515 void *pvMem;
6516 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6517 if (rcStrict == VINF_SUCCESS)
6518 { /* likely */ }
6519 else
6520 {
6521 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6522 if (rcStrict == VINF_SUCCESS)
6523 return pvMem;
6524 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6525 }
6526
6527#endif /* !IEM_WITH_DATA_TLB */
6528
6529 /*
6530 * Fill in the mapping table entry.
6531 */
6532 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6533 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6534 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6535 pVCpu->iem.s.cActiveMappings++;
6536
6537 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6538 return pvMem;
6539}
6540
6541
6542/**
6543 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6544 *
6545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6546 * @param pvMem The mapping.
6547 * @param fAccess The kind of access.
6548 */
6549void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6550{
6551 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6552 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6553
6554 /* If it's bounce buffered, we may need to write back the buffer. */
6555 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6556 {
6557 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6558 {
6559 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6560 if (rcStrict == VINF_SUCCESS)
6561 return;
6562 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6563 }
6564 }
6565 /* Otherwise unlock it. */
6566 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6567 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6568
6569 /* Free the entry. */
6570 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6571 Assert(pVCpu->iem.s.cActiveMappings != 0);
6572 pVCpu->iem.s.cActiveMappings--;
6573}
6574
6575#endif /* IEM_WITH_SETJMP */
6576
6577#ifndef IN_RING3
6578/**
6579 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6580 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6581 *
6582 * Allows the instruction to be completed and retired, while the IEM user will
6583 * return to ring-3 immediately afterwards and do the postponed writes there.
6584 *
6585 * @returns VBox status code (no strict statuses). Caller must check
6586 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6588 * @param pvMem The mapping.
6589 * @param fAccess The kind of access.
6590 */
6591VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6592{
6593 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6594 AssertReturn(iMemMap >= 0, iMemMap);
6595
6596 /* If it's bounce buffered, we may need to write back the buffer. */
6597 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6598 {
6599 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6600 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6601 }
6602 /* Otherwise unlock it. */
6603 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6604 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6605
6606 /* Free the entry. */
6607 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6608 Assert(pVCpu->iem.s.cActiveMappings != 0);
6609 pVCpu->iem.s.cActiveMappings--;
6610 return VINF_SUCCESS;
6611}
6612#endif
6613
6614
6615/**
6616 * Rollbacks mappings, releasing page locks and such.
6617 *
6618 * The caller shall only call this after checking cActiveMappings.
6619 *
6620 * @returns Strict VBox status code to pass up.
6621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6622 */
6623void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6624{
6625 Assert(pVCpu->iem.s.cActiveMappings > 0);
6626
6627 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6628 while (iMemMap-- > 0)
6629 {
6630 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6631 if (fAccess != IEM_ACCESS_INVALID)
6632 {
6633 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6634 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6635 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6636 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6637 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6638 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6639 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6640 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6641 pVCpu->iem.s.cActiveMappings--;
6642 }
6643 }
6644}
6645
6646
6647/**
6648 * Fetches a data byte.
6649 *
6650 * @returns Strict VBox status code.
6651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6652 * @param pu8Dst Where to return the byte.
6653 * @param iSegReg The index of the segment register to use for
6654 * this access. The base and limits are checked.
6655 * @param GCPtrMem The address of the guest memory.
6656 */
6657VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6658{
6659 /* The lazy approach for now... */
6660 uint8_t const *pu8Src;
6661 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6662 if (rc == VINF_SUCCESS)
6663 {
6664 *pu8Dst = *pu8Src;
6665 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6666 }
6667 return rc;
6668}
6669
6670
6671#ifdef IEM_WITH_SETJMP
6672/**
6673 * Fetches a data byte, longjmp on error.
6674 *
6675 * @returns The byte.
6676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6677 * @param iSegReg The index of the segment register to use for
6678 * this access. The base and limits are checked.
6679 * @param GCPtrMem The address of the guest memory.
6680 */
6681uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6682{
6683 /* The lazy approach for now... */
6684 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6685 uint8_t const bRet = *pu8Src;
6686 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6687 return bRet;
6688}
6689#endif /* IEM_WITH_SETJMP */
6690
6691
6692/**
6693 * Fetches a data word.
6694 *
6695 * @returns Strict VBox status code.
6696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6697 * @param pu16Dst Where to return the word.
6698 * @param iSegReg The index of the segment register to use for
6699 * this access. The base and limits are checked.
6700 * @param GCPtrMem The address of the guest memory.
6701 */
6702VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6703{
6704 /* The lazy approach for now... */
6705 uint16_t const *pu16Src;
6706 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6707 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6708 if (rc == VINF_SUCCESS)
6709 {
6710 *pu16Dst = *pu16Src;
6711 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6712 }
6713 return rc;
6714}
6715
6716
6717#ifdef IEM_WITH_SETJMP
6718/**
6719 * Fetches a data word, longjmp on error.
6720 *
6721 * @returns The word
6722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6723 * @param iSegReg The index of the segment register to use for
6724 * this access. The base and limits are checked.
6725 * @param GCPtrMem The address of the guest memory.
6726 */
6727uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6728{
6729 /* The lazy approach for now... */
6730 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6731 sizeof(*pu16Src) - 1);
6732 uint16_t const u16Ret = *pu16Src;
6733 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6734 return u16Ret;
6735}
6736#endif
6737
6738
6739/**
6740 * Fetches a data dword.
6741 *
6742 * @returns Strict VBox status code.
6743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6744 * @param pu32Dst Where to return the dword.
6745 * @param iSegReg The index of the segment register to use for
6746 * this access. The base and limits are checked.
6747 * @param GCPtrMem The address of the guest memory.
6748 */
6749VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6750{
6751 /* The lazy approach for now... */
6752 uint32_t const *pu32Src;
6753 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6754 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6755 if (rc == VINF_SUCCESS)
6756 {
6757 *pu32Dst = *pu32Src;
6758 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6759 }
6760 return rc;
6761}
6762
6763
6764/**
6765 * Fetches a data dword and zero extends it to a qword.
6766 *
6767 * @returns Strict VBox status code.
6768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6769 * @param pu64Dst Where to return the qword.
6770 * @param iSegReg The index of the segment register to use for
6771 * this access. The base and limits are checked.
6772 * @param GCPtrMem The address of the guest memory.
6773 */
6774VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6775{
6776 /* The lazy approach for now... */
6777 uint32_t const *pu32Src;
6778 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6779 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6780 if (rc == VINF_SUCCESS)
6781 {
6782 *pu64Dst = *pu32Src;
6783 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6784 }
6785 return rc;
6786}
6787
6788
6789#ifdef IEM_WITH_SETJMP
6790
6791/**
6792 * Fetches a data dword, longjmp on error, fallback/safe version.
6793 *
6794 * @returns The dword
6795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6796 * @param iSegReg The index of the segment register to use for
6797 * this access. The base and limits are checked.
6798 * @param GCPtrMem The address of the guest memory.
6799 */
6800uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6801{
6802 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6803 sizeof(*pu32Src) - 1);
6804 uint32_t const u32Ret = *pu32Src;
6805 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6806 return u32Ret;
6807}
6808
6809
6810/**
6811 * Fetches a data dword, longjmp on error.
6812 *
6813 * @returns The dword
6814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6815 * @param iSegReg The index of the segment register to use for
6816 * this access. The base and limits are checked.
6817 * @param GCPtrMem The address of the guest memory.
6818 */
6819uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6820{
6821# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6822 /*
6823 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6824 */
6825 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6826 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6827 {
6828 /*
6829 * TLB lookup.
6830 */
6831 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6832 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6833 if (pTlbe->uTag == uTag)
6834 {
6835 /*
6836 * Check TLB page table level access flags.
6837 */
6838 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6839 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6840 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6841 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6842 {
6843 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6844
6845 /*
6846 * Alignment check:
6847 */
6848 /** @todo check priority \#AC vs \#PF */
6849 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6850 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6851 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6852 || pVCpu->iem.s.uCpl != 3)
6853 {
6854 /*
6855 * Fetch and return the dword
6856 */
6857 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6858 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6859 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6860 }
6861 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6862 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6863 }
6864 }
6865 }
6866
6867 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6868 outdated page pointer, or other troubles. */
6869 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6870 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6871
6872# else
6873 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6874 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6875 uint32_t const u32Ret = *pu32Src;
6876 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6877 return u32Ret;
6878# endif
6879}
6880#endif
6881
6882
6883#ifdef SOME_UNUSED_FUNCTION
6884/**
6885 * Fetches a data dword and sign extends it to a qword.
6886 *
6887 * @returns Strict VBox status code.
6888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6889 * @param pu64Dst Where to return the sign extended value.
6890 * @param iSegReg The index of the segment register to use for
6891 * this access. The base and limits are checked.
6892 * @param GCPtrMem The address of the guest memory.
6893 */
6894VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6895{
6896 /* The lazy approach for now... */
6897 int32_t const *pi32Src;
6898 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6899 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6900 if (rc == VINF_SUCCESS)
6901 {
6902 *pu64Dst = *pi32Src;
6903 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6904 }
6905#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6906 else
6907 *pu64Dst = 0;
6908#endif
6909 return rc;
6910}
6911#endif
6912
6913
6914/**
6915 * Fetches a data qword.
6916 *
6917 * @returns Strict VBox status code.
6918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6919 * @param pu64Dst Where to return the qword.
6920 * @param iSegReg The index of the segment register to use for
6921 * this access. The base and limits are checked.
6922 * @param GCPtrMem The address of the guest memory.
6923 */
6924VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6925{
6926 /* The lazy approach for now... */
6927 uint64_t const *pu64Src;
6928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6929 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6930 if (rc == VINF_SUCCESS)
6931 {
6932 *pu64Dst = *pu64Src;
6933 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6934 }
6935 return rc;
6936}
6937
6938
6939#ifdef IEM_WITH_SETJMP
6940/**
6941 * Fetches a data qword, longjmp on error.
6942 *
6943 * @returns The qword.
6944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6945 * @param iSegReg The index of the segment register to use for
6946 * this access. The base and limits are checked.
6947 * @param GCPtrMem The address of the guest memory.
6948 */
6949uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6950{
6951 /* The lazy approach for now... */
6952 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6953 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6954 uint64_t const u64Ret = *pu64Src;
6955 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6956 return u64Ret;
6957}
6958#endif
6959
6960
6961/**
6962 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6963 *
6964 * @returns Strict VBox status code.
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 * @param pu64Dst Where to return the qword.
6967 * @param iSegReg The index of the segment register to use for
6968 * this access. The base and limits are checked.
6969 * @param GCPtrMem The address of the guest memory.
6970 */
6971VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6972{
6973 /* The lazy approach for now... */
6974 uint64_t const *pu64Src;
6975 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6976 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6977 if (rc == VINF_SUCCESS)
6978 {
6979 *pu64Dst = *pu64Src;
6980 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6981 }
6982 return rc;
6983}
6984
6985
6986#ifdef IEM_WITH_SETJMP
6987/**
6988 * Fetches a data qword, longjmp on error.
6989 *
6990 * @returns The qword.
6991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6992 * @param iSegReg The index of the segment register to use for
6993 * this access. The base and limits are checked.
6994 * @param GCPtrMem The address of the guest memory.
6995 */
6996uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6997{
6998 /* The lazy approach for now... */
6999 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7000 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7001 uint64_t const u64Ret = *pu64Src;
7002 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7003 return u64Ret;
7004}
7005#endif
7006
7007
7008/**
7009 * Fetches a data tword.
7010 *
7011 * @returns Strict VBox status code.
7012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7013 * @param pr80Dst Where to return the tword.
7014 * @param iSegReg The index of the segment register to use for
7015 * this access. The base and limits are checked.
7016 * @param GCPtrMem The address of the guest memory.
7017 */
7018VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7019{
7020 /* The lazy approach for now... */
7021 PCRTFLOAT80U pr80Src;
7022 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7023 if (rc == VINF_SUCCESS)
7024 {
7025 *pr80Dst = *pr80Src;
7026 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7027 }
7028 return rc;
7029}
7030
7031
7032#ifdef IEM_WITH_SETJMP
7033/**
7034 * Fetches a data tword, longjmp on error.
7035 *
7036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7037 * @param pr80Dst Where to return the tword.
7038 * @param iSegReg The index of the segment register to use for
7039 * this access. The base and limits are checked.
7040 * @param GCPtrMem The address of the guest memory.
7041 */
7042void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7043{
7044 /* The lazy approach for now... */
7045 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7046 *pr80Dst = *pr80Src;
7047 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7048}
7049#endif
7050
7051
7052/**
7053 * Fetches a data decimal tword.
7054 *
7055 * @returns Strict VBox status code.
7056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7057 * @param pd80Dst Where to return the tword.
7058 * @param iSegReg The index of the segment register to use for
7059 * this access. The base and limits are checked.
7060 * @param GCPtrMem The address of the guest memory.
7061 */
7062VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7063{
7064 /* The lazy approach for now... */
7065 PCRTPBCD80U pd80Src;
7066 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7067 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7068 if (rc == VINF_SUCCESS)
7069 {
7070 *pd80Dst = *pd80Src;
7071 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7072 }
7073 return rc;
7074}
7075
7076
7077#ifdef IEM_WITH_SETJMP
7078/**
7079 * Fetches a data decimal tword, longjmp on error.
7080 *
7081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7082 * @param pd80Dst Where to return the tword.
7083 * @param iSegReg The index of the segment register to use for
7084 * this access. The base and limits are checked.
7085 * @param GCPtrMem The address of the guest memory.
7086 */
7087void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7088{
7089 /* The lazy approach for now... */
7090 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7091 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7092 *pd80Dst = *pd80Src;
7093 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7094}
7095#endif
7096
7097
7098/**
7099 * Fetches a data dqword (double qword), generally SSE related.
7100 *
7101 * @returns Strict VBox status code.
7102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7103 * @param pu128Dst Where to return the qword.
7104 * @param iSegReg The index of the segment register to use for
7105 * this access. The base and limits are checked.
7106 * @param GCPtrMem The address of the guest memory.
7107 */
7108VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7109{
7110 /* The lazy approach for now... */
7111 PCRTUINT128U pu128Src;
7112 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7113 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7114 if (rc == VINF_SUCCESS)
7115 {
7116 pu128Dst->au64[0] = pu128Src->au64[0];
7117 pu128Dst->au64[1] = pu128Src->au64[1];
7118 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7119 }
7120 return rc;
7121}
7122
7123
7124#ifdef IEM_WITH_SETJMP
7125/**
7126 * Fetches a data dqword (double qword), generally SSE related.
7127 *
7128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7129 * @param pu128Dst Where to return the qword.
7130 * @param iSegReg The index of the segment register to use for
7131 * this access. The base and limits are checked.
7132 * @param GCPtrMem The address of the guest memory.
7133 */
7134void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7135{
7136 /* The lazy approach for now... */
7137 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7138 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7139 pu128Dst->au64[0] = pu128Src->au64[0];
7140 pu128Dst->au64[1] = pu128Src->au64[1];
7141 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7142}
7143#endif
7144
7145
7146/**
7147 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7148 * related.
7149 *
7150 * Raises \#GP(0) if not aligned.
7151 *
7152 * @returns Strict VBox status code.
7153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7154 * @param pu128Dst Where to return the qword.
7155 * @param iSegReg The index of the segment register to use for
7156 * this access. The base and limits are checked.
7157 * @param GCPtrMem The address of the guest memory.
7158 */
7159VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7160{
7161 /* The lazy approach for now... */
7162 PCRTUINT128U pu128Src;
7163 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7164 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7165 if (rc == VINF_SUCCESS)
7166 {
7167 pu128Dst->au64[0] = pu128Src->au64[0];
7168 pu128Dst->au64[1] = pu128Src->au64[1];
7169 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7170 }
7171 return rc;
7172}
7173
7174
7175#ifdef IEM_WITH_SETJMP
7176/**
7177 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7178 * related, longjmp on error.
7179 *
7180 * Raises \#GP(0) if not aligned.
7181 *
7182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7183 * @param pu128Dst Where to return the qword.
7184 * @param iSegReg The index of the segment register to use for
7185 * this access. The base and limits are checked.
7186 * @param GCPtrMem The address of the guest memory.
7187 */
7188void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7189{
7190 /* The lazy approach for now... */
7191 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7192 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7193 pu128Dst->au64[0] = pu128Src->au64[0];
7194 pu128Dst->au64[1] = pu128Src->au64[1];
7195 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7196}
7197#endif
7198
7199
7200/**
7201 * Fetches a data oword (octo word), generally AVX related.
7202 *
7203 * @returns Strict VBox status code.
7204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7205 * @param pu256Dst Where to return the qword.
7206 * @param iSegReg The index of the segment register to use for
7207 * this access. The base and limits are checked.
7208 * @param GCPtrMem The address of the guest memory.
7209 */
7210VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7211{
7212 /* The lazy approach for now... */
7213 PCRTUINT256U pu256Src;
7214 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7215 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7216 if (rc == VINF_SUCCESS)
7217 {
7218 pu256Dst->au64[0] = pu256Src->au64[0];
7219 pu256Dst->au64[1] = pu256Src->au64[1];
7220 pu256Dst->au64[2] = pu256Src->au64[2];
7221 pu256Dst->au64[3] = pu256Src->au64[3];
7222 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7223 }
7224 return rc;
7225}
7226
7227
7228#ifdef IEM_WITH_SETJMP
7229/**
7230 * Fetches a data oword (octo word), generally AVX related.
7231 *
7232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7233 * @param pu256Dst Where to return the qword.
7234 * @param iSegReg The index of the segment register to use for
7235 * this access. The base and limits are checked.
7236 * @param GCPtrMem The address of the guest memory.
7237 */
7238void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7239{
7240 /* The lazy approach for now... */
7241 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7242 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7243 pu256Dst->au64[0] = pu256Src->au64[0];
7244 pu256Dst->au64[1] = pu256Src->au64[1];
7245 pu256Dst->au64[2] = pu256Src->au64[2];
7246 pu256Dst->au64[3] = pu256Src->au64[3];
7247 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7248}
7249#endif
7250
7251
7252/**
7253 * Fetches a data oword (octo word) at an aligned address, generally AVX
7254 * related.
7255 *
7256 * Raises \#GP(0) if not aligned.
7257 *
7258 * @returns Strict VBox status code.
7259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7260 * @param pu256Dst Where to return the qword.
7261 * @param iSegReg The index of the segment register to use for
7262 * this access. The base and limits are checked.
7263 * @param GCPtrMem The address of the guest memory.
7264 */
7265VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7266{
7267 /* The lazy approach for now... */
7268 PCRTUINT256U pu256Src;
7269 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7270 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7271 if (rc == VINF_SUCCESS)
7272 {
7273 pu256Dst->au64[0] = pu256Src->au64[0];
7274 pu256Dst->au64[1] = pu256Src->au64[1];
7275 pu256Dst->au64[2] = pu256Src->au64[2];
7276 pu256Dst->au64[3] = pu256Src->au64[3];
7277 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7278 }
7279 return rc;
7280}
7281
7282
7283#ifdef IEM_WITH_SETJMP
7284/**
7285 * Fetches a data oword (octo word) at an aligned address, generally AVX
7286 * related, longjmp on error.
7287 *
7288 * Raises \#GP(0) if not aligned.
7289 *
7290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7291 * @param pu256Dst Where to return the qword.
7292 * @param iSegReg The index of the segment register to use for
7293 * this access. The base and limits are checked.
7294 * @param GCPtrMem The address of the guest memory.
7295 */
7296void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7297{
7298 /* The lazy approach for now... */
7299 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7300 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7301 pu256Dst->au64[0] = pu256Src->au64[0];
7302 pu256Dst->au64[1] = pu256Src->au64[1];
7303 pu256Dst->au64[2] = pu256Src->au64[2];
7304 pu256Dst->au64[3] = pu256Src->au64[3];
7305 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7306}
7307#endif
7308
7309
7310
7311/**
7312 * Fetches a descriptor register (lgdt, lidt).
7313 *
7314 * @returns Strict VBox status code.
7315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7316 * @param pcbLimit Where to return the limit.
7317 * @param pGCPtrBase Where to return the base.
7318 * @param iSegReg The index of the segment register to use for
7319 * this access. The base and limits are checked.
7320 * @param GCPtrMem The address of the guest memory.
7321 * @param enmOpSize The effective operand size.
7322 */
7323VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7324 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7325{
7326 /*
7327 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7328 * little special:
7329 * - The two reads are done separately.
7330 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7331 * - We suspect the 386 to actually commit the limit before the base in
7332 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7333 * don't try emulate this eccentric behavior, because it's not well
7334 * enough understood and rather hard to trigger.
7335 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7336 */
7337 VBOXSTRICTRC rcStrict;
7338 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7339 {
7340 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7341 if (rcStrict == VINF_SUCCESS)
7342 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7343 }
7344 else
7345 {
7346 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7347 if (enmOpSize == IEMMODE_32BIT)
7348 {
7349 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7350 {
7351 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7352 if (rcStrict == VINF_SUCCESS)
7353 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7354 }
7355 else
7356 {
7357 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7358 if (rcStrict == VINF_SUCCESS)
7359 {
7360 *pcbLimit = (uint16_t)uTmp;
7361 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7362 }
7363 }
7364 if (rcStrict == VINF_SUCCESS)
7365 *pGCPtrBase = uTmp;
7366 }
7367 else
7368 {
7369 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7370 if (rcStrict == VINF_SUCCESS)
7371 {
7372 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7373 if (rcStrict == VINF_SUCCESS)
7374 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7375 }
7376 }
7377 }
7378 return rcStrict;
7379}
7380
7381
7382
7383/**
7384 * Stores a data byte.
7385 *
7386 * @returns Strict VBox status code.
7387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7388 * @param iSegReg The index of the segment register to use for
7389 * this access. The base and limits are checked.
7390 * @param GCPtrMem The address of the guest memory.
7391 * @param u8Value The value to store.
7392 */
7393VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7394{
7395 /* The lazy approach for now... */
7396 uint8_t *pu8Dst;
7397 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7398 if (rc == VINF_SUCCESS)
7399 {
7400 *pu8Dst = u8Value;
7401 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7402 }
7403 return rc;
7404}
7405
7406
7407#ifdef IEM_WITH_SETJMP
7408/**
7409 * Stores a data byte, longjmp on error.
7410 *
7411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7412 * @param iSegReg The index of the segment register to use for
7413 * this access. The base and limits are checked.
7414 * @param GCPtrMem The address of the guest memory.
7415 * @param u8Value The value to store.
7416 */
7417void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7418{
7419 /* The lazy approach for now... */
7420 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7421 *pu8Dst = u8Value;
7422 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7423}
7424#endif
7425
7426
7427/**
7428 * Stores a data word.
7429 *
7430 * @returns Strict VBox status code.
7431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7432 * @param iSegReg The index of the segment register to use for
7433 * this access. The base and limits are checked.
7434 * @param GCPtrMem The address of the guest memory.
7435 * @param u16Value The value to store.
7436 */
7437VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7438{
7439 /* The lazy approach for now... */
7440 uint16_t *pu16Dst;
7441 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7442 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7443 if (rc == VINF_SUCCESS)
7444 {
7445 *pu16Dst = u16Value;
7446 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7447 }
7448 return rc;
7449}
7450
7451
7452#ifdef IEM_WITH_SETJMP
7453/**
7454 * Stores a data word, longjmp on error.
7455 *
7456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7457 * @param iSegReg The index of the segment register to use for
7458 * this access. The base and limits are checked.
7459 * @param GCPtrMem The address of the guest memory.
7460 * @param u16Value The value to store.
7461 */
7462void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7463{
7464 /* The lazy approach for now... */
7465 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7466 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7467 *pu16Dst = u16Value;
7468 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7469}
7470#endif
7471
7472
7473/**
7474 * Stores a data dword.
7475 *
7476 * @returns Strict VBox status code.
7477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7478 * @param iSegReg The index of the segment register to use for
7479 * this access. The base and limits are checked.
7480 * @param GCPtrMem The address of the guest memory.
7481 * @param u32Value The value to store.
7482 */
7483VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7484{
7485 /* The lazy approach for now... */
7486 uint32_t *pu32Dst;
7487 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7488 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7489 if (rc == VINF_SUCCESS)
7490 {
7491 *pu32Dst = u32Value;
7492 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7493 }
7494 return rc;
7495}
7496
7497
7498#ifdef IEM_WITH_SETJMP
7499/**
7500 * Stores a data dword.
7501 *
7502 * @returns Strict VBox status code.
7503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7504 * @param iSegReg The index of the segment register to use for
7505 * this access. The base and limits are checked.
7506 * @param GCPtrMem The address of the guest memory.
7507 * @param u32Value The value to store.
7508 */
7509void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7510{
7511 /* The lazy approach for now... */
7512 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7513 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7514 *pu32Dst = u32Value;
7515 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7516}
7517#endif
7518
7519
7520/**
7521 * Stores a data qword.
7522 *
7523 * @returns Strict VBox status code.
7524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7525 * @param iSegReg The index of the segment register to use for
7526 * this access. The base and limits are checked.
7527 * @param GCPtrMem The address of the guest memory.
7528 * @param u64Value The value to store.
7529 */
7530VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7531{
7532 /* The lazy approach for now... */
7533 uint64_t *pu64Dst;
7534 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7535 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7536 if (rc == VINF_SUCCESS)
7537 {
7538 *pu64Dst = u64Value;
7539 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7540 }
7541 return rc;
7542}
7543
7544
7545#ifdef IEM_WITH_SETJMP
7546/**
7547 * Stores a data qword, longjmp on error.
7548 *
7549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7550 * @param iSegReg The index of the segment register to use for
7551 * this access. The base and limits are checked.
7552 * @param GCPtrMem The address of the guest memory.
7553 * @param u64Value The value to store.
7554 */
7555void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7556{
7557 /* The lazy approach for now... */
7558 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7559 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7560 *pu64Dst = u64Value;
7561 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7562}
7563#endif
7564
7565
7566/**
7567 * Stores a data dqword.
7568 *
7569 * @returns Strict VBox status code.
7570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7571 * @param iSegReg The index of the segment register to use for
7572 * this access. The base and limits are checked.
7573 * @param GCPtrMem The address of the guest memory.
7574 * @param u128Value The value to store.
7575 */
7576VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7577{
7578 /* The lazy approach for now... */
7579 PRTUINT128U pu128Dst;
7580 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7581 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7582 if (rc == VINF_SUCCESS)
7583 {
7584 pu128Dst->au64[0] = u128Value.au64[0];
7585 pu128Dst->au64[1] = u128Value.au64[1];
7586 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7587 }
7588 return rc;
7589}
7590
7591
7592#ifdef IEM_WITH_SETJMP
7593/**
7594 * Stores a data dqword, longjmp on error.
7595 *
7596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7597 * @param iSegReg The index of the segment register to use for
7598 * this access. The base and limits are checked.
7599 * @param GCPtrMem The address of the guest memory.
7600 * @param u128Value The value to store.
7601 */
7602void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7603{
7604 /* The lazy approach for now... */
7605 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7606 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7607 pu128Dst->au64[0] = u128Value.au64[0];
7608 pu128Dst->au64[1] = u128Value.au64[1];
7609 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7610}
7611#endif
7612
7613
7614/**
7615 * Stores a data dqword, SSE aligned.
7616 *
7617 * @returns Strict VBox status code.
7618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7619 * @param iSegReg The index of the segment register to use for
7620 * this access. The base and limits are checked.
7621 * @param GCPtrMem The address of the guest memory.
7622 * @param u128Value The value to store.
7623 */
7624VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7625{
7626 /* The lazy approach for now... */
7627 PRTUINT128U pu128Dst;
7628 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7629 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7630 if (rc == VINF_SUCCESS)
7631 {
7632 pu128Dst->au64[0] = u128Value.au64[0];
7633 pu128Dst->au64[1] = u128Value.au64[1];
7634 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7635 }
7636 return rc;
7637}
7638
7639
7640#ifdef IEM_WITH_SETJMP
7641/**
7642 * Stores a data dqword, SSE aligned.
7643 *
7644 * @returns Strict VBox status code.
7645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7646 * @param iSegReg The index of the segment register to use for
7647 * this access. The base and limits are checked.
7648 * @param GCPtrMem The address of the guest memory.
7649 * @param u128Value The value to store.
7650 */
7651void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7652{
7653 /* The lazy approach for now... */
7654 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7655 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7656 pu128Dst->au64[0] = u128Value.au64[0];
7657 pu128Dst->au64[1] = u128Value.au64[1];
7658 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7659}
7660#endif
7661
7662
7663/**
7664 * Stores a data dqword.
7665 *
7666 * @returns Strict VBox status code.
7667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7668 * @param iSegReg The index of the segment register to use for
7669 * this access. The base and limits are checked.
7670 * @param GCPtrMem The address of the guest memory.
7671 * @param pu256Value Pointer to the value to store.
7672 */
7673VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7674{
7675 /* The lazy approach for now... */
7676 PRTUINT256U pu256Dst;
7677 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7678 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7679 if (rc == VINF_SUCCESS)
7680 {
7681 pu256Dst->au64[0] = pu256Value->au64[0];
7682 pu256Dst->au64[1] = pu256Value->au64[1];
7683 pu256Dst->au64[2] = pu256Value->au64[2];
7684 pu256Dst->au64[3] = pu256Value->au64[3];
7685 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7686 }
7687 return rc;
7688}
7689
7690
7691#ifdef IEM_WITH_SETJMP
7692/**
7693 * Stores a data dqword, longjmp on error.
7694 *
7695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7696 * @param iSegReg The index of the segment register to use for
7697 * this access. The base and limits are checked.
7698 * @param GCPtrMem The address of the guest memory.
7699 * @param pu256Value Pointer to the value to store.
7700 */
7701void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7702{
7703 /* The lazy approach for now... */
7704 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7705 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7706 pu256Dst->au64[0] = pu256Value->au64[0];
7707 pu256Dst->au64[1] = pu256Value->au64[1];
7708 pu256Dst->au64[2] = pu256Value->au64[2];
7709 pu256Dst->au64[3] = pu256Value->au64[3];
7710 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7711}
7712#endif
7713
7714
7715/**
7716 * Stores a data dqword, AVX \#GP(0) aligned.
7717 *
7718 * @returns Strict VBox status code.
7719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7720 * @param iSegReg The index of the segment register to use for
7721 * this access. The base and limits are checked.
7722 * @param GCPtrMem The address of the guest memory.
7723 * @param pu256Value Pointer to the value to store.
7724 */
7725VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7726{
7727 /* The lazy approach for now... */
7728 PRTUINT256U pu256Dst;
7729 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7730 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7731 if (rc == VINF_SUCCESS)
7732 {
7733 pu256Dst->au64[0] = pu256Value->au64[0];
7734 pu256Dst->au64[1] = pu256Value->au64[1];
7735 pu256Dst->au64[2] = pu256Value->au64[2];
7736 pu256Dst->au64[3] = pu256Value->au64[3];
7737 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7738 }
7739 return rc;
7740}
7741
7742
7743#ifdef IEM_WITH_SETJMP
7744/**
7745 * Stores a data dqword, AVX aligned.
7746 *
7747 * @returns Strict VBox status code.
7748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7749 * @param iSegReg The index of the segment register to use for
7750 * this access. The base and limits are checked.
7751 * @param GCPtrMem The address of the guest memory.
7752 * @param pu256Value Pointer to the value to store.
7753 */
7754void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7755{
7756 /* The lazy approach for now... */
7757 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7758 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7759 pu256Dst->au64[0] = pu256Value->au64[0];
7760 pu256Dst->au64[1] = pu256Value->au64[1];
7761 pu256Dst->au64[2] = pu256Value->au64[2];
7762 pu256Dst->au64[3] = pu256Value->au64[3];
7763 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7764}
7765#endif
7766
7767
7768/**
7769 * Stores a descriptor register (sgdt, sidt).
7770 *
7771 * @returns Strict VBox status code.
7772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7773 * @param cbLimit The limit.
7774 * @param GCPtrBase The base address.
7775 * @param iSegReg The index of the segment register to use for
7776 * this access. The base and limits are checked.
7777 * @param GCPtrMem The address of the guest memory.
7778 */
7779VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7780{
7781 /*
7782 * The SIDT and SGDT instructions actually stores the data using two
7783 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7784 * does not respond to opsize prefixes.
7785 */
7786 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7787 if (rcStrict == VINF_SUCCESS)
7788 {
7789 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7790 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7791 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7792 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7793 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7794 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7795 else
7796 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7797 }
7798 return rcStrict;
7799}
7800
7801
7802/**
7803 * Pushes a word onto the stack.
7804 *
7805 * @returns Strict VBox status code.
7806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7807 * @param u16Value The value to push.
7808 */
7809VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7810{
7811 /* Increment the stack pointer. */
7812 uint64_t uNewRsp;
7813 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7814
7815 /* Write the word the lazy way. */
7816 uint16_t *pu16Dst;
7817 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7818 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7819 if (rc == VINF_SUCCESS)
7820 {
7821 *pu16Dst = u16Value;
7822 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7823 }
7824
7825 /* Commit the new RSP value unless we an access handler made trouble. */
7826 if (rc == VINF_SUCCESS)
7827 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7828
7829 return rc;
7830}
7831
7832
7833/**
7834 * Pushes a dword onto the stack.
7835 *
7836 * @returns Strict VBox status code.
7837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7838 * @param u32Value The value to push.
7839 */
7840VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7841{
7842 /* Increment the stack pointer. */
7843 uint64_t uNewRsp;
7844 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7845
7846 /* Write the dword the lazy way. */
7847 uint32_t *pu32Dst;
7848 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7849 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7850 if (rc == VINF_SUCCESS)
7851 {
7852 *pu32Dst = u32Value;
7853 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7854 }
7855
7856 /* Commit the new RSP value unless we an access handler made trouble. */
7857 if (rc == VINF_SUCCESS)
7858 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7859
7860 return rc;
7861}
7862
7863
7864/**
7865 * Pushes a dword segment register value onto the stack.
7866 *
7867 * @returns Strict VBox status code.
7868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7869 * @param u32Value The value to push.
7870 */
7871VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7872{
7873 /* Increment the stack pointer. */
7874 uint64_t uNewRsp;
7875 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7876
7877 /* The intel docs talks about zero extending the selector register
7878 value. My actual intel CPU here might be zero extending the value
7879 but it still only writes the lower word... */
7880 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7881 * happens when crossing an electric page boundrary, is the high word checked
7882 * for write accessibility or not? Probably it is. What about segment limits?
7883 * It appears this behavior is also shared with trap error codes.
7884 *
7885 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7886 * ancient hardware when it actually did change. */
7887 uint16_t *pu16Dst;
7888 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7889 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7890 if (rc == VINF_SUCCESS)
7891 {
7892 *pu16Dst = (uint16_t)u32Value;
7893 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7894 }
7895
7896 /* Commit the new RSP value unless we an access handler made trouble. */
7897 if (rc == VINF_SUCCESS)
7898 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7899
7900 return rc;
7901}
7902
7903
7904/**
7905 * Pushes a qword onto the stack.
7906 *
7907 * @returns Strict VBox status code.
7908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7909 * @param u64Value The value to push.
7910 */
7911VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7912{
7913 /* Increment the stack pointer. */
7914 uint64_t uNewRsp;
7915 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7916
7917 /* Write the word the lazy way. */
7918 uint64_t *pu64Dst;
7919 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7920 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7921 if (rc == VINF_SUCCESS)
7922 {
7923 *pu64Dst = u64Value;
7924 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7925 }
7926
7927 /* Commit the new RSP value unless we an access handler made trouble. */
7928 if (rc == VINF_SUCCESS)
7929 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7930
7931 return rc;
7932}
7933
7934
7935/**
7936 * Pops a word from the stack.
7937 *
7938 * @returns Strict VBox status code.
7939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7940 * @param pu16Value Where to store the popped value.
7941 */
7942VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7943{
7944 /* Increment the stack pointer. */
7945 uint64_t uNewRsp;
7946 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7947
7948 /* Write the word the lazy way. */
7949 uint16_t const *pu16Src;
7950 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7951 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7952 if (rc == VINF_SUCCESS)
7953 {
7954 *pu16Value = *pu16Src;
7955 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7956
7957 /* Commit the new RSP value. */
7958 if (rc == VINF_SUCCESS)
7959 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7960 }
7961
7962 return rc;
7963}
7964
7965
7966/**
7967 * Pops a dword from the stack.
7968 *
7969 * @returns Strict VBox status code.
7970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7971 * @param pu32Value Where to store the popped value.
7972 */
7973VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7974{
7975 /* Increment the stack pointer. */
7976 uint64_t uNewRsp;
7977 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7978
7979 /* Write the word the lazy way. */
7980 uint32_t const *pu32Src;
7981 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
7982 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
7983 if (rc == VINF_SUCCESS)
7984 {
7985 *pu32Value = *pu32Src;
7986 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7987
7988 /* Commit the new RSP value. */
7989 if (rc == VINF_SUCCESS)
7990 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7991 }
7992
7993 return rc;
7994}
7995
7996
7997/**
7998 * Pops a qword from the stack.
7999 *
8000 * @returns Strict VBox status code.
8001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8002 * @param pu64Value Where to store the popped value.
8003 */
8004VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8005{
8006 /* Increment the stack pointer. */
8007 uint64_t uNewRsp;
8008 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8009
8010 /* Write the word the lazy way. */
8011 uint64_t const *pu64Src;
8012 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8013 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8014 if (rc == VINF_SUCCESS)
8015 {
8016 *pu64Value = *pu64Src;
8017 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8018
8019 /* Commit the new RSP value. */
8020 if (rc == VINF_SUCCESS)
8021 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8022 }
8023
8024 return rc;
8025}
8026
8027
8028/**
8029 * Pushes a word onto the stack, using a temporary stack pointer.
8030 *
8031 * @returns Strict VBox status code.
8032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8033 * @param u16Value The value to push.
8034 * @param pTmpRsp Pointer to the temporary stack pointer.
8035 */
8036VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8037{
8038 /* Increment the stack pointer. */
8039 RTUINT64U NewRsp = *pTmpRsp;
8040 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8041
8042 /* Write the word the lazy way. */
8043 uint16_t *pu16Dst;
8044 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8045 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8046 if (rc == VINF_SUCCESS)
8047 {
8048 *pu16Dst = u16Value;
8049 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8050 }
8051
8052 /* Commit the new RSP value unless we an access handler made trouble. */
8053 if (rc == VINF_SUCCESS)
8054 *pTmpRsp = NewRsp;
8055
8056 return rc;
8057}
8058
8059
8060/**
8061 * Pushes a dword onto the stack, using a temporary stack pointer.
8062 *
8063 * @returns Strict VBox status code.
8064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8065 * @param u32Value The value to push.
8066 * @param pTmpRsp Pointer to the temporary stack pointer.
8067 */
8068VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8069{
8070 /* Increment the stack pointer. */
8071 RTUINT64U NewRsp = *pTmpRsp;
8072 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8073
8074 /* Write the word the lazy way. */
8075 uint32_t *pu32Dst;
8076 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8077 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8078 if (rc == VINF_SUCCESS)
8079 {
8080 *pu32Dst = u32Value;
8081 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8082 }
8083
8084 /* Commit the new RSP value unless we an access handler made trouble. */
8085 if (rc == VINF_SUCCESS)
8086 *pTmpRsp = NewRsp;
8087
8088 return rc;
8089}
8090
8091
8092/**
8093 * Pushes a dword onto the stack, using a temporary stack pointer.
8094 *
8095 * @returns Strict VBox status code.
8096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8097 * @param u64Value The value to push.
8098 * @param pTmpRsp Pointer to the temporary stack pointer.
8099 */
8100VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8101{
8102 /* Increment the stack pointer. */
8103 RTUINT64U NewRsp = *pTmpRsp;
8104 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8105
8106 /* Write the word the lazy way. */
8107 uint64_t *pu64Dst;
8108 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8109 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8110 if (rc == VINF_SUCCESS)
8111 {
8112 *pu64Dst = u64Value;
8113 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8114 }
8115
8116 /* Commit the new RSP value unless we an access handler made trouble. */
8117 if (rc == VINF_SUCCESS)
8118 *pTmpRsp = NewRsp;
8119
8120 return rc;
8121}
8122
8123
8124/**
8125 * Pops a word from the stack, using a temporary stack pointer.
8126 *
8127 * @returns Strict VBox status code.
8128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8129 * @param pu16Value Where to store the popped value.
8130 * @param pTmpRsp Pointer to the temporary stack pointer.
8131 */
8132VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8133{
8134 /* Increment the stack pointer. */
8135 RTUINT64U NewRsp = *pTmpRsp;
8136 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8137
8138 /* Write the word the lazy way. */
8139 uint16_t const *pu16Src;
8140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8141 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8142 if (rc == VINF_SUCCESS)
8143 {
8144 *pu16Value = *pu16Src;
8145 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8146
8147 /* Commit the new RSP value. */
8148 if (rc == VINF_SUCCESS)
8149 *pTmpRsp = NewRsp;
8150 }
8151
8152 return rc;
8153}
8154
8155
8156/**
8157 * Pops a dword from the stack, using a temporary stack pointer.
8158 *
8159 * @returns Strict VBox status code.
8160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8161 * @param pu32Value Where to store the popped value.
8162 * @param pTmpRsp Pointer to the temporary stack pointer.
8163 */
8164VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8165{
8166 /* Increment the stack pointer. */
8167 RTUINT64U NewRsp = *pTmpRsp;
8168 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8169
8170 /* Write the word the lazy way. */
8171 uint32_t const *pu32Src;
8172 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8173 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8174 if (rc == VINF_SUCCESS)
8175 {
8176 *pu32Value = *pu32Src;
8177 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8178
8179 /* Commit the new RSP value. */
8180 if (rc == VINF_SUCCESS)
8181 *pTmpRsp = NewRsp;
8182 }
8183
8184 return rc;
8185}
8186
8187
8188/**
8189 * Pops a qword from the stack, using a temporary stack pointer.
8190 *
8191 * @returns Strict VBox status code.
8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8193 * @param pu64Value Where to store the popped value.
8194 * @param pTmpRsp Pointer to the temporary stack pointer.
8195 */
8196VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8197{
8198 /* Increment the stack pointer. */
8199 RTUINT64U NewRsp = *pTmpRsp;
8200 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8201
8202 /* Write the word the lazy way. */
8203 uint64_t const *pu64Src;
8204 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8205 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8206 if (rcStrict == VINF_SUCCESS)
8207 {
8208 *pu64Value = *pu64Src;
8209 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8210
8211 /* Commit the new RSP value. */
8212 if (rcStrict == VINF_SUCCESS)
8213 *pTmpRsp = NewRsp;
8214 }
8215
8216 return rcStrict;
8217}
8218
8219
8220/**
8221 * Begin a special stack push (used by interrupt, exceptions and such).
8222 *
8223 * This will raise \#SS or \#PF if appropriate.
8224 *
8225 * @returns Strict VBox status code.
8226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8227 * @param cbMem The number of bytes to push onto the stack.
8228 * @param cbAlign The alignment mask (7, 3, 1).
8229 * @param ppvMem Where to return the pointer to the stack memory.
8230 * As with the other memory functions this could be
8231 * direct access or bounce buffered access, so
8232 * don't commit register until the commit call
8233 * succeeds.
8234 * @param puNewRsp Where to return the new RSP value. This must be
8235 * passed unchanged to
8236 * iemMemStackPushCommitSpecial().
8237 */
8238VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8239 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8240{
8241 Assert(cbMem < UINT8_MAX);
8242 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8243 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8244 IEM_ACCESS_STACK_W, cbAlign);
8245}
8246
8247
8248/**
8249 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8250 *
8251 * This will update the rSP.
8252 *
8253 * @returns Strict VBox status code.
8254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8255 * @param pvMem The pointer returned by
8256 * iemMemStackPushBeginSpecial().
8257 * @param uNewRsp The new RSP value returned by
8258 * iemMemStackPushBeginSpecial().
8259 */
8260VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8261{
8262 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8263 if (rcStrict == VINF_SUCCESS)
8264 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8265 return rcStrict;
8266}
8267
8268
8269/**
8270 * Begin a special stack pop (used by iret, retf and such).
8271 *
8272 * This will raise \#SS or \#PF if appropriate.
8273 *
8274 * @returns Strict VBox status code.
8275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8276 * @param cbMem The number of bytes to pop from the stack.
8277 * @param cbAlign The alignment mask (7, 3, 1).
8278 * @param ppvMem Where to return the pointer to the stack memory.
8279 * @param puNewRsp Where to return the new RSP value. This must be
8280 * assigned to CPUMCTX::rsp manually some time
8281 * after iemMemStackPopDoneSpecial() has been
8282 * called.
8283 */
8284VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8285 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8286{
8287 Assert(cbMem < UINT8_MAX);
8288 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8289 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8290}
8291
8292
8293/**
8294 * Continue a special stack pop (used by iret and retf), for the purpose of
8295 * retrieving a new stack pointer.
8296 *
8297 * This will raise \#SS or \#PF if appropriate.
8298 *
8299 * @returns Strict VBox status code.
8300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8301 * @param off Offset from the top of the stack. This is zero
8302 * except in the retf case.
8303 * @param cbMem The number of bytes to pop from the stack.
8304 * @param ppvMem Where to return the pointer to the stack memory.
8305 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8306 * return this because all use of this function is
8307 * to retrieve a new value and anything we return
8308 * here would be discarded.)
8309 */
8310VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8311 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8312{
8313 Assert(cbMem < UINT8_MAX);
8314
8315 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8316 RTGCPTR GCPtrTop;
8317 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8318 GCPtrTop = uCurNewRsp;
8319 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8320 GCPtrTop = (uint32_t)uCurNewRsp;
8321 else
8322 GCPtrTop = (uint16_t)uCurNewRsp;
8323
8324 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8325 0 /* checked in iemMemStackPopBeginSpecial */);
8326}
8327
8328
8329/**
8330 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8331 * iemMemStackPopContinueSpecial).
8332 *
8333 * The caller will manually commit the rSP.
8334 *
8335 * @returns Strict VBox status code.
8336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8337 * @param pvMem The pointer returned by
8338 * iemMemStackPopBeginSpecial() or
8339 * iemMemStackPopContinueSpecial().
8340 */
8341VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8342{
8343 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8344}
8345
8346
8347/**
8348 * Fetches a system table byte.
8349 *
8350 * @returns Strict VBox status code.
8351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8352 * @param pbDst Where to return the byte.
8353 * @param iSegReg The index of the segment register to use for
8354 * this access. The base and limits are checked.
8355 * @param GCPtrMem The address of the guest memory.
8356 */
8357VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8358{
8359 /* The lazy approach for now... */
8360 uint8_t const *pbSrc;
8361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8362 if (rc == VINF_SUCCESS)
8363 {
8364 *pbDst = *pbSrc;
8365 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8366 }
8367 return rc;
8368}
8369
8370
8371/**
8372 * Fetches a system table word.
8373 *
8374 * @returns Strict VBox status code.
8375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8376 * @param pu16Dst Where to return the word.
8377 * @param iSegReg The index of the segment register to use for
8378 * this access. The base and limits are checked.
8379 * @param GCPtrMem The address of the guest memory.
8380 */
8381VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8382{
8383 /* The lazy approach for now... */
8384 uint16_t const *pu16Src;
8385 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8386 if (rc == VINF_SUCCESS)
8387 {
8388 *pu16Dst = *pu16Src;
8389 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8390 }
8391 return rc;
8392}
8393
8394
8395/**
8396 * Fetches a system table dword.
8397 *
8398 * @returns Strict VBox status code.
8399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8400 * @param pu32Dst Where to return the dword.
8401 * @param iSegReg The index of the segment register to use for
8402 * this access. The base and limits are checked.
8403 * @param GCPtrMem The address of the guest memory.
8404 */
8405VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8406{
8407 /* The lazy approach for now... */
8408 uint32_t const *pu32Src;
8409 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8410 if (rc == VINF_SUCCESS)
8411 {
8412 *pu32Dst = *pu32Src;
8413 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8414 }
8415 return rc;
8416}
8417
8418
8419/**
8420 * Fetches a system table qword.
8421 *
8422 * @returns Strict VBox status code.
8423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8424 * @param pu64Dst Where to return the qword.
8425 * @param iSegReg The index of the segment register to use for
8426 * this access. The base and limits are checked.
8427 * @param GCPtrMem The address of the guest memory.
8428 */
8429VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8430{
8431 /* The lazy approach for now... */
8432 uint64_t const *pu64Src;
8433 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8434 if (rc == VINF_SUCCESS)
8435 {
8436 *pu64Dst = *pu64Src;
8437 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8438 }
8439 return rc;
8440}
8441
8442
8443/**
8444 * Fetches a descriptor table entry with caller specified error code.
8445 *
8446 * @returns Strict VBox status code.
8447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8448 * @param pDesc Where to return the descriptor table entry.
8449 * @param uSel The selector which table entry to fetch.
8450 * @param uXcpt The exception to raise on table lookup error.
8451 * @param uErrorCode The error code associated with the exception.
8452 */
8453static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8454 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8455{
8456 AssertPtr(pDesc);
8457 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8458
8459 /** @todo did the 286 require all 8 bytes to be accessible? */
8460 /*
8461 * Get the selector table base and check bounds.
8462 */
8463 RTGCPTR GCPtrBase;
8464 if (uSel & X86_SEL_LDT)
8465 {
8466 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8467 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8468 {
8469 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8470 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8471 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8472 uErrorCode, 0);
8473 }
8474
8475 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8476 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8477 }
8478 else
8479 {
8480 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8481 {
8482 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8483 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8484 uErrorCode, 0);
8485 }
8486 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8487 }
8488
8489 /*
8490 * Read the legacy descriptor and maybe the long mode extensions if
8491 * required.
8492 */
8493 VBOXSTRICTRC rcStrict;
8494 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8495 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8496 else
8497 {
8498 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8499 if (rcStrict == VINF_SUCCESS)
8500 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8501 if (rcStrict == VINF_SUCCESS)
8502 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8503 if (rcStrict == VINF_SUCCESS)
8504 pDesc->Legacy.au16[3] = 0;
8505 else
8506 return rcStrict;
8507 }
8508
8509 if (rcStrict == VINF_SUCCESS)
8510 {
8511 if ( !IEM_IS_LONG_MODE(pVCpu)
8512 || pDesc->Legacy.Gen.u1DescType)
8513 pDesc->Long.au64[1] = 0;
8514 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8515 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8516 else
8517 {
8518 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8519 /** @todo is this the right exception? */
8520 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8521 }
8522 }
8523 return rcStrict;
8524}
8525
8526
8527/**
8528 * Fetches a descriptor table entry.
8529 *
8530 * @returns Strict VBox status code.
8531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8532 * @param pDesc Where to return the descriptor table entry.
8533 * @param uSel The selector which table entry to fetch.
8534 * @param uXcpt The exception to raise on table lookup error.
8535 */
8536VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8537{
8538 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8539}
8540
8541
8542/**
8543 * Marks the selector descriptor as accessed (only non-system descriptors).
8544 *
8545 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8546 * will therefore skip the limit checks.
8547 *
8548 * @returns Strict VBox status code.
8549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8550 * @param uSel The selector.
8551 */
8552VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8553{
8554 /*
8555 * Get the selector table base and calculate the entry address.
8556 */
8557 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8558 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8559 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8560 GCPtr += uSel & X86_SEL_MASK;
8561
8562 /*
8563 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8564 * ugly stuff to avoid this. This will make sure it's an atomic access
8565 * as well more or less remove any question about 8-bit or 32-bit accesss.
8566 */
8567 VBOXSTRICTRC rcStrict;
8568 uint32_t volatile *pu32;
8569 if ((GCPtr & 3) == 0)
8570 {
8571 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8572 GCPtr += 2 + 2;
8573 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8574 if (rcStrict != VINF_SUCCESS)
8575 return rcStrict;
8576 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8577 }
8578 else
8579 {
8580 /* The misaligned GDT/LDT case, map the whole thing. */
8581 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8582 if (rcStrict != VINF_SUCCESS)
8583 return rcStrict;
8584 switch ((uintptr_t)pu32 & 3)
8585 {
8586 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8587 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8588 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8589 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8590 }
8591 }
8592
8593 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8594}
8595
8596/** @} */
8597
8598/** @name Opcode Helpers.
8599 * @{
8600 */
8601
8602/**
8603 * Calculates the effective address of a ModR/M memory operand.
8604 *
8605 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8606 *
8607 * @return Strict VBox status code.
8608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8609 * @param bRm The ModRM byte.
8610 * @param cbImm The size of any immediate following the
8611 * effective address opcode bytes. Important for
8612 * RIP relative addressing.
8613 * @param pGCPtrEff Where to return the effective address.
8614 */
8615VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8616{
8617 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8618# define SET_SS_DEF() \
8619 do \
8620 { \
8621 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8622 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8623 } while (0)
8624
8625 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8626 {
8627/** @todo Check the effective address size crap! */
8628 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8629 {
8630 uint16_t u16EffAddr;
8631
8632 /* Handle the disp16 form with no registers first. */
8633 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8634 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8635 else
8636 {
8637 /* Get the displacment. */
8638 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8639 {
8640 case 0: u16EffAddr = 0; break;
8641 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8642 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8643 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8644 }
8645
8646 /* Add the base and index registers to the disp. */
8647 switch (bRm & X86_MODRM_RM_MASK)
8648 {
8649 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8650 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8651 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8652 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8653 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8654 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8655 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8656 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8657 }
8658 }
8659
8660 *pGCPtrEff = u16EffAddr;
8661 }
8662 else
8663 {
8664 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8665 uint32_t u32EffAddr;
8666
8667 /* Handle the disp32 form with no registers first. */
8668 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8669 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8670 else
8671 {
8672 /* Get the register (or SIB) value. */
8673 switch ((bRm & X86_MODRM_RM_MASK))
8674 {
8675 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8676 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8677 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8678 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8679 case 4: /* SIB */
8680 {
8681 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8682
8683 /* Get the index and scale it. */
8684 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8685 {
8686 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8687 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8688 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8689 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8690 case 4: u32EffAddr = 0; /*none */ break;
8691 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8692 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8693 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8695 }
8696 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8697
8698 /* add base */
8699 switch (bSib & X86_SIB_BASE_MASK)
8700 {
8701 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8702 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8703 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8704 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8705 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8706 case 5:
8707 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8708 {
8709 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8710 SET_SS_DEF();
8711 }
8712 else
8713 {
8714 uint32_t u32Disp;
8715 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8716 u32EffAddr += u32Disp;
8717 }
8718 break;
8719 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8720 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8721 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8722 }
8723 break;
8724 }
8725 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8726 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8727 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8728 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8729 }
8730
8731 /* Get and add the displacement. */
8732 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8733 {
8734 case 0:
8735 break;
8736 case 1:
8737 {
8738 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8739 u32EffAddr += i8Disp;
8740 break;
8741 }
8742 case 2:
8743 {
8744 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8745 u32EffAddr += u32Disp;
8746 break;
8747 }
8748 default:
8749 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8750 }
8751
8752 }
8753 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8754 *pGCPtrEff = u32EffAddr;
8755 else
8756 {
8757 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8758 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8759 }
8760 }
8761 }
8762 else
8763 {
8764 uint64_t u64EffAddr;
8765
8766 /* Handle the rip+disp32 form with no registers first. */
8767 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8768 {
8769 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8770 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8771 }
8772 else
8773 {
8774 /* Get the register (or SIB) value. */
8775 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8776 {
8777 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8778 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8779 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8780 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8781 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8782 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8783 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8784 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8785 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8786 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8787 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8788 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8789 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8790 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8791 /* SIB */
8792 case 4:
8793 case 12:
8794 {
8795 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8796
8797 /* Get the index and scale it. */
8798 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8799 {
8800 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8801 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8802 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8803 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8804 case 4: u64EffAddr = 0; /*none */ break;
8805 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8806 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8807 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8808 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8809 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8810 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8811 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8812 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8813 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8814 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8815 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8816 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8817 }
8818 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8819
8820 /* add base */
8821 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8822 {
8823 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8824 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8825 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8826 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8827 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8828 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8829 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8830 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8831 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8832 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8833 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8834 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8835 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8836 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8837 /* complicated encodings */
8838 case 5:
8839 case 13:
8840 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8841 {
8842 if (!pVCpu->iem.s.uRexB)
8843 {
8844 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8845 SET_SS_DEF();
8846 }
8847 else
8848 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8849 }
8850 else
8851 {
8852 uint32_t u32Disp;
8853 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8854 u64EffAddr += (int32_t)u32Disp;
8855 }
8856 break;
8857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8858 }
8859 break;
8860 }
8861 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8862 }
8863
8864 /* Get and add the displacement. */
8865 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8866 {
8867 case 0:
8868 break;
8869 case 1:
8870 {
8871 int8_t i8Disp;
8872 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8873 u64EffAddr += i8Disp;
8874 break;
8875 }
8876 case 2:
8877 {
8878 uint32_t u32Disp;
8879 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8880 u64EffAddr += (int32_t)u32Disp;
8881 break;
8882 }
8883 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8884 }
8885
8886 }
8887
8888 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8889 *pGCPtrEff = u64EffAddr;
8890 else
8891 {
8892 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8893 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8894 }
8895 }
8896
8897 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8898 return VINF_SUCCESS;
8899}
8900
8901
8902/**
8903 * Calculates the effective address of a ModR/M memory operand.
8904 *
8905 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8906 *
8907 * @return Strict VBox status code.
8908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8909 * @param bRm The ModRM byte.
8910 * @param cbImm The size of any immediate following the
8911 * effective address opcode bytes. Important for
8912 * RIP relative addressing.
8913 * @param pGCPtrEff Where to return the effective address.
8914 * @param offRsp RSP displacement.
8915 */
8916VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8917{
8918 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8919# define SET_SS_DEF() \
8920 do \
8921 { \
8922 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8923 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8924 } while (0)
8925
8926 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8927 {
8928/** @todo Check the effective address size crap! */
8929 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8930 {
8931 uint16_t u16EffAddr;
8932
8933 /* Handle the disp16 form with no registers first. */
8934 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8935 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8936 else
8937 {
8938 /* Get the displacment. */
8939 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8940 {
8941 case 0: u16EffAddr = 0; break;
8942 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8943 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8944 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8945 }
8946
8947 /* Add the base and index registers to the disp. */
8948 switch (bRm & X86_MODRM_RM_MASK)
8949 {
8950 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8951 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8952 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8953 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8954 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8955 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8956 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8957 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8958 }
8959 }
8960
8961 *pGCPtrEff = u16EffAddr;
8962 }
8963 else
8964 {
8965 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8966 uint32_t u32EffAddr;
8967
8968 /* Handle the disp32 form with no registers first. */
8969 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8970 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8971 else
8972 {
8973 /* Get the register (or SIB) value. */
8974 switch ((bRm & X86_MODRM_RM_MASK))
8975 {
8976 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8977 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8978 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8979 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8980 case 4: /* SIB */
8981 {
8982 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8983
8984 /* Get the index and scale it. */
8985 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8986 {
8987 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8988 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8989 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8990 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8991 case 4: u32EffAddr = 0; /*none */ break;
8992 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8993 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8994 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8995 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8996 }
8997 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8998
8999 /* add base */
9000 switch (bSib & X86_SIB_BASE_MASK)
9001 {
9002 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9003 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9004 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9005 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9006 case 4:
9007 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
9008 SET_SS_DEF();
9009 break;
9010 case 5:
9011 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9012 {
9013 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9014 SET_SS_DEF();
9015 }
9016 else
9017 {
9018 uint32_t u32Disp;
9019 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9020 u32EffAddr += u32Disp;
9021 }
9022 break;
9023 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9024 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9025 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9026 }
9027 break;
9028 }
9029 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9030 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9031 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9032 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9033 }
9034
9035 /* Get and add the displacement. */
9036 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9037 {
9038 case 0:
9039 break;
9040 case 1:
9041 {
9042 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9043 u32EffAddr += i8Disp;
9044 break;
9045 }
9046 case 2:
9047 {
9048 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9049 u32EffAddr += u32Disp;
9050 break;
9051 }
9052 default:
9053 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9054 }
9055
9056 }
9057 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9058 *pGCPtrEff = u32EffAddr;
9059 else
9060 {
9061 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9062 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9063 }
9064 }
9065 }
9066 else
9067 {
9068 uint64_t u64EffAddr;
9069
9070 /* Handle the rip+disp32 form with no registers first. */
9071 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9072 {
9073 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9074 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9075 }
9076 else
9077 {
9078 /* Get the register (or SIB) value. */
9079 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9080 {
9081 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9082 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9083 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9084 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9085 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9086 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9087 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9088 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9089 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9090 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9091 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9092 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9093 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9094 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9095 /* SIB */
9096 case 4:
9097 case 12:
9098 {
9099 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9100
9101 /* Get the index and scale it. */
9102 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9103 {
9104 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9105 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9106 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9107 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9108 case 4: u64EffAddr = 0; /*none */ break;
9109 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9110 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9111 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9112 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9113 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9114 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9115 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9116 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9117 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9118 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9119 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9120 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9121 }
9122 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9123
9124 /* add base */
9125 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9126 {
9127 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9128 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9129 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9130 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9131 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9132 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9133 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9134 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9135 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9136 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9137 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9138 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9139 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9140 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9141 /* complicated encodings */
9142 case 5:
9143 case 13:
9144 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9145 {
9146 if (!pVCpu->iem.s.uRexB)
9147 {
9148 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9149 SET_SS_DEF();
9150 }
9151 else
9152 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9153 }
9154 else
9155 {
9156 uint32_t u32Disp;
9157 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9158 u64EffAddr += (int32_t)u32Disp;
9159 }
9160 break;
9161 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9162 }
9163 break;
9164 }
9165 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9166 }
9167
9168 /* Get and add the displacement. */
9169 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9170 {
9171 case 0:
9172 break;
9173 case 1:
9174 {
9175 int8_t i8Disp;
9176 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9177 u64EffAddr += i8Disp;
9178 break;
9179 }
9180 case 2:
9181 {
9182 uint32_t u32Disp;
9183 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9184 u64EffAddr += (int32_t)u32Disp;
9185 break;
9186 }
9187 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9188 }
9189
9190 }
9191
9192 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9193 *pGCPtrEff = u64EffAddr;
9194 else
9195 {
9196 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9197 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9198 }
9199 }
9200
9201 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9202 return VINF_SUCCESS;
9203}
9204
9205
9206#ifdef IEM_WITH_SETJMP
9207/**
9208 * Calculates the effective address of a ModR/M memory operand.
9209 *
9210 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9211 *
9212 * May longjmp on internal error.
9213 *
9214 * @return The effective address.
9215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9216 * @param bRm The ModRM byte.
9217 * @param cbImm The size of any immediate following the
9218 * effective address opcode bytes. Important for
9219 * RIP relative addressing.
9220 */
9221RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9222{
9223 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9224# define SET_SS_DEF() \
9225 do \
9226 { \
9227 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9228 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9229 } while (0)
9230
9231 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9232 {
9233/** @todo Check the effective address size crap! */
9234 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9235 {
9236 uint16_t u16EffAddr;
9237
9238 /* Handle the disp16 form with no registers first. */
9239 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9240 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9241 else
9242 {
9243 /* Get the displacment. */
9244 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9245 {
9246 case 0: u16EffAddr = 0; break;
9247 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9248 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9249 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9250 }
9251
9252 /* Add the base and index registers to the disp. */
9253 switch (bRm & X86_MODRM_RM_MASK)
9254 {
9255 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9256 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9257 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9258 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9259 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9260 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9261 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9262 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9263 }
9264 }
9265
9266 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9267 return u16EffAddr;
9268 }
9269
9270 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9271 uint32_t u32EffAddr;
9272
9273 /* Handle the disp32 form with no registers first. */
9274 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9275 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9276 else
9277 {
9278 /* Get the register (or SIB) value. */
9279 switch ((bRm & X86_MODRM_RM_MASK))
9280 {
9281 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9282 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9283 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9284 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9285 case 4: /* SIB */
9286 {
9287 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9288
9289 /* Get the index and scale it. */
9290 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9291 {
9292 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9293 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9294 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9295 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9296 case 4: u32EffAddr = 0; /*none */ break;
9297 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9298 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9299 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9300 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9301 }
9302 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9303
9304 /* add base */
9305 switch (bSib & X86_SIB_BASE_MASK)
9306 {
9307 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9308 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9309 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9310 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9311 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9312 case 5:
9313 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9314 {
9315 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9316 SET_SS_DEF();
9317 }
9318 else
9319 {
9320 uint32_t u32Disp;
9321 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9322 u32EffAddr += u32Disp;
9323 }
9324 break;
9325 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9326 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9327 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9328 }
9329 break;
9330 }
9331 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9332 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9333 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9334 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9335 }
9336
9337 /* Get and add the displacement. */
9338 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9339 {
9340 case 0:
9341 break;
9342 case 1:
9343 {
9344 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9345 u32EffAddr += i8Disp;
9346 break;
9347 }
9348 case 2:
9349 {
9350 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9351 u32EffAddr += u32Disp;
9352 break;
9353 }
9354 default:
9355 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9356 }
9357 }
9358
9359 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9360 {
9361 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9362 return u32EffAddr;
9363 }
9364 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9365 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9366 return u32EffAddr & UINT16_MAX;
9367 }
9368
9369 uint64_t u64EffAddr;
9370
9371 /* Handle the rip+disp32 form with no registers first. */
9372 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9373 {
9374 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9375 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9376 }
9377 else
9378 {
9379 /* Get the register (or SIB) value. */
9380 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9381 {
9382 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9383 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9384 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9385 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9386 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9387 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9388 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9389 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9390 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9391 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9392 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9393 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9394 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9395 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9396 /* SIB */
9397 case 4:
9398 case 12:
9399 {
9400 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9401
9402 /* Get the index and scale it. */
9403 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9404 {
9405 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9406 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9407 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9408 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9409 case 4: u64EffAddr = 0; /*none */ break;
9410 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9411 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9412 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9413 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9414 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9415 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9416 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9417 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9418 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9419 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9420 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9421 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9422 }
9423 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9424
9425 /* add base */
9426 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9427 {
9428 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9429 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9430 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9431 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9432 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9433 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9434 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9435 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9436 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9437 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9438 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9439 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9440 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9441 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9442 /* complicated encodings */
9443 case 5:
9444 case 13:
9445 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9446 {
9447 if (!pVCpu->iem.s.uRexB)
9448 {
9449 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9450 SET_SS_DEF();
9451 }
9452 else
9453 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9454 }
9455 else
9456 {
9457 uint32_t u32Disp;
9458 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9459 u64EffAddr += (int32_t)u32Disp;
9460 }
9461 break;
9462 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9463 }
9464 break;
9465 }
9466 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9467 }
9468
9469 /* Get and add the displacement. */
9470 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9471 {
9472 case 0:
9473 break;
9474 case 1:
9475 {
9476 int8_t i8Disp;
9477 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9478 u64EffAddr += i8Disp;
9479 break;
9480 }
9481 case 2:
9482 {
9483 uint32_t u32Disp;
9484 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9485 u64EffAddr += (int32_t)u32Disp;
9486 break;
9487 }
9488 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9489 }
9490
9491 }
9492
9493 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9494 {
9495 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9496 return u64EffAddr;
9497 }
9498 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9499 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9500 return u64EffAddr & UINT32_MAX;
9501}
9502#endif /* IEM_WITH_SETJMP */
9503
9504/** @} */
9505
9506
9507#ifdef LOG_ENABLED
9508/**
9509 * Logs the current instruction.
9510 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9511 * @param fSameCtx Set if we have the same context information as the VMM,
9512 * clear if we may have already executed an instruction in
9513 * our debug context. When clear, we assume IEMCPU holds
9514 * valid CPU mode info.
9515 *
9516 * The @a fSameCtx parameter is now misleading and obsolete.
9517 * @param pszFunction The IEM function doing the execution.
9518 */
9519static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9520{
9521# ifdef IN_RING3
9522 if (LogIs2Enabled())
9523 {
9524 char szInstr[256];
9525 uint32_t cbInstr = 0;
9526 if (fSameCtx)
9527 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9528 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9529 szInstr, sizeof(szInstr), &cbInstr);
9530 else
9531 {
9532 uint32_t fFlags = 0;
9533 switch (pVCpu->iem.s.enmCpuMode)
9534 {
9535 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9536 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9537 case IEMMODE_16BIT:
9538 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9539 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9540 else
9541 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9542 break;
9543 }
9544 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9545 szInstr, sizeof(szInstr), &cbInstr);
9546 }
9547
9548 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9549 Log2(("**** %s\n"
9550 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9551 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9552 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9553 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9554 " %s\n"
9555 , pszFunction,
9556 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9557 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9558 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9559 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9560 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9561 szInstr));
9562
9563 if (LogIs3Enabled())
9564 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9565 }
9566 else
9567# endif
9568 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9569 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9570 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9571}
9572#endif /* LOG_ENABLED */
9573
9574
9575#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9576/**
9577 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9578 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9579 *
9580 * @returns Modified rcStrict.
9581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9582 * @param rcStrict The instruction execution status.
9583 */
9584static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9585{
9586 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9587 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9588 {
9589 /* VMX preemption timer takes priority over NMI-window exits. */
9590 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9591 {
9592 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9593 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9594 }
9595 /*
9596 * Check remaining intercepts.
9597 *
9598 * NMI-window and Interrupt-window VM-exits.
9599 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9600 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9601 *
9602 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9603 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9604 */
9605 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9606 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9607 && !TRPMHasTrap(pVCpu))
9608 {
9609 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9610 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9611 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9612 {
9613 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9614 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9615 }
9616 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9617 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9618 {
9619 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9620 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9621 }
9622 }
9623 }
9624 /* TPR-below threshold/APIC write has the highest priority. */
9625 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9626 {
9627 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9628 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9629 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9630 }
9631 /* MTF takes priority over VMX-preemption timer. */
9632 else
9633 {
9634 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9635 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9636 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9637 }
9638 return rcStrict;
9639}
9640#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9641
9642
9643/**
9644 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9645 * IEMExecOneWithPrefetchedByPC.
9646 *
9647 * Similar code is found in IEMExecLots.
9648 *
9649 * @return Strict VBox status code.
9650 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9651 * @param fExecuteInhibit If set, execute the instruction following CLI,
9652 * POP SS and MOV SS,GR.
9653 * @param pszFunction The calling function name.
9654 */
9655DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9656{
9657 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9658 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9659 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9660 RT_NOREF_PV(pszFunction);
9661
9662#ifdef IEM_WITH_SETJMP
9663 VBOXSTRICTRC rcStrict;
9664 jmp_buf JmpBuf;
9665 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9666 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9667 if ((rcStrict = setjmp(JmpBuf)) == 0)
9668 {
9669 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9670 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9671 }
9672 else
9673 pVCpu->iem.s.cLongJumps++;
9674 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9675#else
9676 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9677 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9678#endif
9679 if (rcStrict == VINF_SUCCESS)
9680 pVCpu->iem.s.cInstructions++;
9681 if (pVCpu->iem.s.cActiveMappings > 0)
9682 {
9683 Assert(rcStrict != VINF_SUCCESS);
9684 iemMemRollback(pVCpu);
9685 }
9686 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9687 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9688 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9689
9690//#ifdef DEBUG
9691// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9692//#endif
9693
9694#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9695 /*
9696 * Perform any VMX nested-guest instruction boundary actions.
9697 *
9698 * If any of these causes a VM-exit, we must skip executing the next
9699 * instruction (would run into stale page tables). A VM-exit makes sure
9700 * there is no interrupt-inhibition, so that should ensure we don't go
9701 * to try execute the next instruction. Clearing fExecuteInhibit is
9702 * problematic because of the setjmp/longjmp clobbering above.
9703 */
9704 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9705 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9706 || rcStrict != VINF_SUCCESS)
9707 { /* likely */ }
9708 else
9709 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9710#endif
9711
9712 /* Execute the next instruction as well if a cli, pop ss or
9713 mov ss, Gr has just completed successfully. */
9714 if ( fExecuteInhibit
9715 && rcStrict == VINF_SUCCESS
9716 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9717 && EMIsInhibitInterruptsActive(pVCpu))
9718 {
9719 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9720 if (rcStrict == VINF_SUCCESS)
9721 {
9722#ifdef LOG_ENABLED
9723 iemLogCurInstr(pVCpu, false, pszFunction);
9724#endif
9725#ifdef IEM_WITH_SETJMP
9726 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9727 if ((rcStrict = setjmp(JmpBuf)) == 0)
9728 {
9729 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9730 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9731 }
9732 else
9733 pVCpu->iem.s.cLongJumps++;
9734 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9735#else
9736 IEM_OPCODE_GET_NEXT_U8(&b);
9737 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9738#endif
9739 if (rcStrict == VINF_SUCCESS)
9740 {
9741 pVCpu->iem.s.cInstructions++;
9742#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9743 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9744 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9745 { /* likely */ }
9746 else
9747 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9748#endif
9749 }
9750 if (pVCpu->iem.s.cActiveMappings > 0)
9751 {
9752 Assert(rcStrict != VINF_SUCCESS);
9753 iemMemRollback(pVCpu);
9754 }
9755 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9756 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9757 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9758 }
9759 else if (pVCpu->iem.s.cActiveMappings > 0)
9760 iemMemRollback(pVCpu);
9761 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9762 }
9763
9764 /*
9765 * Return value fiddling, statistics and sanity assertions.
9766 */
9767 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9768
9769 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9770 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9771 return rcStrict;
9772}
9773
9774
9775/**
9776 * Execute one instruction.
9777 *
9778 * @return Strict VBox status code.
9779 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9780 */
9781VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9782{
9783 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9784#ifdef LOG_ENABLED
9785 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9786#endif
9787
9788 /*
9789 * Do the decoding and emulation.
9790 */
9791 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9792 if (rcStrict == VINF_SUCCESS)
9793 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9794 else if (pVCpu->iem.s.cActiveMappings > 0)
9795 iemMemRollback(pVCpu);
9796
9797 if (rcStrict != VINF_SUCCESS)
9798 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9799 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9800 return rcStrict;
9801}
9802
9803
9804VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9805{
9806 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9807
9808 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9809 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9810 if (rcStrict == VINF_SUCCESS)
9811 {
9812 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9813 if (pcbWritten)
9814 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9815 }
9816 else if (pVCpu->iem.s.cActiveMappings > 0)
9817 iemMemRollback(pVCpu);
9818
9819 return rcStrict;
9820}
9821
9822
9823VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9824 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9825{
9826 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9827
9828 VBOXSTRICTRC rcStrict;
9829 if ( cbOpcodeBytes
9830 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9831 {
9832 iemInitDecoder(pVCpu, false, false);
9833#ifdef IEM_WITH_CODE_TLB
9834 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9835 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9836 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9837 pVCpu->iem.s.offCurInstrStart = 0;
9838 pVCpu->iem.s.offInstrNextByte = 0;
9839#else
9840 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9841 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9842#endif
9843 rcStrict = VINF_SUCCESS;
9844 }
9845 else
9846 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9847 if (rcStrict == VINF_SUCCESS)
9848 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9849 else if (pVCpu->iem.s.cActiveMappings > 0)
9850 iemMemRollback(pVCpu);
9851
9852 return rcStrict;
9853}
9854
9855
9856VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9857{
9858 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9859
9860 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9861 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9862 if (rcStrict == VINF_SUCCESS)
9863 {
9864 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9865 if (pcbWritten)
9866 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9867 }
9868 else if (pVCpu->iem.s.cActiveMappings > 0)
9869 iemMemRollback(pVCpu);
9870
9871 return rcStrict;
9872}
9873
9874
9875VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9876 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9877{
9878 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9879
9880 VBOXSTRICTRC rcStrict;
9881 if ( cbOpcodeBytes
9882 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9883 {
9884 iemInitDecoder(pVCpu, true, false);
9885#ifdef IEM_WITH_CODE_TLB
9886 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9887 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9888 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9889 pVCpu->iem.s.offCurInstrStart = 0;
9890 pVCpu->iem.s.offInstrNextByte = 0;
9891#else
9892 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9893 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9894#endif
9895 rcStrict = VINF_SUCCESS;
9896 }
9897 else
9898 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9899 if (rcStrict == VINF_SUCCESS)
9900 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9901 else if (pVCpu->iem.s.cActiveMappings > 0)
9902 iemMemRollback(pVCpu);
9903
9904 return rcStrict;
9905}
9906
9907
9908/**
9909 * For debugging DISGetParamSize, may come in handy.
9910 *
9911 * @returns Strict VBox status code.
9912 * @param pVCpu The cross context virtual CPU structure of the
9913 * calling EMT.
9914 * @param pCtxCore The context core structure.
9915 * @param OpcodeBytesPC The PC of the opcode bytes.
9916 * @param pvOpcodeBytes Prefeched opcode bytes.
9917 * @param cbOpcodeBytes Number of prefetched bytes.
9918 * @param pcbWritten Where to return the number of bytes written.
9919 * Optional.
9920 */
9921VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9922 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9923 uint32_t *pcbWritten)
9924{
9925 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9926
9927 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9928 VBOXSTRICTRC rcStrict;
9929 if ( cbOpcodeBytes
9930 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9931 {
9932 iemInitDecoder(pVCpu, true, false);
9933#ifdef IEM_WITH_CODE_TLB
9934 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9935 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9936 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9937 pVCpu->iem.s.offCurInstrStart = 0;
9938 pVCpu->iem.s.offInstrNextByte = 0;
9939#else
9940 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9941 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9942#endif
9943 rcStrict = VINF_SUCCESS;
9944 }
9945 else
9946 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9947 if (rcStrict == VINF_SUCCESS)
9948 {
9949 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9950 if (pcbWritten)
9951 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9952 }
9953 else if (pVCpu->iem.s.cActiveMappings > 0)
9954 iemMemRollback(pVCpu);
9955
9956 return rcStrict;
9957}
9958
9959
9960/**
9961 * For handling split cacheline lock operations when the host has split-lock
9962 * detection enabled.
9963 *
9964 * This will cause the interpreter to disregard the lock prefix and implicit
9965 * locking (xchg).
9966 *
9967 * @returns Strict VBox status code.
9968 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9969 */
9970VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9971{
9972 /*
9973 * Do the decoding and emulation.
9974 */
9975 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9976 if (rcStrict == VINF_SUCCESS)
9977 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9978 else if (pVCpu->iem.s.cActiveMappings > 0)
9979 iemMemRollback(pVCpu);
9980
9981 if (rcStrict != VINF_SUCCESS)
9982 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9983 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9984 return rcStrict;
9985}
9986
9987
9988VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9989{
9990 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9991 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9992
9993 /*
9994 * See if there is an interrupt pending in TRPM, inject it if we can.
9995 */
9996 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9997#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9998 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9999 if (fIntrEnabled)
10000 {
10001 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10002 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10003 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10004 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10005 else
10006 {
10007 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10008 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10009 }
10010 }
10011#else
10012 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10013#endif
10014
10015 /** @todo What if we are injecting an exception and not an interrupt? Is that
10016 * possible here? For now we assert it is indeed only an interrupt. */
10017 if ( fIntrEnabled
10018 && TRPMHasTrap(pVCpu)
10019 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
10020 {
10021 uint8_t u8TrapNo;
10022 TRPMEVENT enmType;
10023 uint32_t uErrCode;
10024 RTGCPTR uCr2;
10025 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
10026 AssertRC(rc2);
10027 Assert(enmType == TRPM_HARDWARE_INT);
10028 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
10029 TRPMResetTrap(pVCpu);
10030#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10031 /* Injecting an event may cause a VM-exit. */
10032 if ( rcStrict != VINF_SUCCESS
10033 && rcStrict != VINF_IEM_RAISED_XCPT)
10034 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10035#else
10036 NOREF(rcStrict);
10037#endif
10038 }
10039
10040 /*
10041 * Initial decoder init w/ prefetch, then setup setjmp.
10042 */
10043 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10044 if (rcStrict == VINF_SUCCESS)
10045 {
10046#ifdef IEM_WITH_SETJMP
10047 jmp_buf JmpBuf;
10048 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10049 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10050 pVCpu->iem.s.cActiveMappings = 0;
10051 if ((rcStrict = setjmp(JmpBuf)) == 0)
10052#endif
10053 {
10054 /*
10055 * The run loop. We limit ourselves to 4096 instructions right now.
10056 */
10057 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10058 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10059 for (;;)
10060 {
10061 /*
10062 * Log the state.
10063 */
10064#ifdef LOG_ENABLED
10065 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10066#endif
10067
10068 /*
10069 * Do the decoding and emulation.
10070 */
10071 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10072 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10073 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10074 {
10075 Assert(pVCpu->iem.s.cActiveMappings == 0);
10076 pVCpu->iem.s.cInstructions++;
10077
10078#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10079 /* Perform any VMX nested-guest instruction boundary actions. */
10080 uint64_t fCpu = pVCpu->fLocalForcedActions;
10081 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10082 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10083 { /* likely */ }
10084 else
10085 {
10086 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10087 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10088 fCpu = pVCpu->fLocalForcedActions;
10089 else
10090 {
10091 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10092 break;
10093 }
10094 }
10095#endif
10096 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10097 {
10098#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10099 uint64_t fCpu = pVCpu->fLocalForcedActions;
10100#endif
10101 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10102 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10103 | VMCPU_FF_TLB_FLUSH
10104 | VMCPU_FF_INHIBIT_INTERRUPTS
10105 | VMCPU_FF_BLOCK_NMIS
10106 | VMCPU_FF_UNHALT );
10107
10108 if (RT_LIKELY( ( !fCpu
10109 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10110 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10111 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10112 {
10113 if (cMaxInstructionsGccStupidity-- > 0)
10114 {
10115 /* Poll timers every now an then according to the caller's specs. */
10116 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10117 || !TMTimerPollBool(pVM, pVCpu))
10118 {
10119 Assert(pVCpu->iem.s.cActiveMappings == 0);
10120 iemReInitDecoder(pVCpu);
10121 continue;
10122 }
10123 }
10124 }
10125 }
10126 Assert(pVCpu->iem.s.cActiveMappings == 0);
10127 }
10128 else if (pVCpu->iem.s.cActiveMappings > 0)
10129 iemMemRollback(pVCpu);
10130 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10131 break;
10132 }
10133 }
10134#ifdef IEM_WITH_SETJMP
10135 else
10136 {
10137 if (pVCpu->iem.s.cActiveMappings > 0)
10138 iemMemRollback(pVCpu);
10139# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10140 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10141# endif
10142 pVCpu->iem.s.cLongJumps++;
10143 }
10144 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10145#endif
10146
10147 /*
10148 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10149 */
10150 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10151 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10152 }
10153 else
10154 {
10155 if (pVCpu->iem.s.cActiveMappings > 0)
10156 iemMemRollback(pVCpu);
10157
10158#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10159 /*
10160 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10161 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10162 */
10163 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10164#endif
10165 }
10166
10167 /*
10168 * Maybe re-enter raw-mode and log.
10169 */
10170 if (rcStrict != VINF_SUCCESS)
10171 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10172 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10173 if (pcInstructions)
10174 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10175 return rcStrict;
10176}
10177
10178
10179/**
10180 * Interface used by EMExecuteExec, does exit statistics and limits.
10181 *
10182 * @returns Strict VBox status code.
10183 * @param pVCpu The cross context virtual CPU structure.
10184 * @param fWillExit To be defined.
10185 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10186 * @param cMaxInstructions Maximum number of instructions to execute.
10187 * @param cMaxInstructionsWithoutExits
10188 * The max number of instructions without exits.
10189 * @param pStats Where to return statistics.
10190 */
10191VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10192 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10193{
10194 NOREF(fWillExit); /** @todo define flexible exit crits */
10195
10196 /*
10197 * Initialize return stats.
10198 */
10199 pStats->cInstructions = 0;
10200 pStats->cExits = 0;
10201 pStats->cMaxExitDistance = 0;
10202 pStats->cReserved = 0;
10203
10204 /*
10205 * Initial decoder init w/ prefetch, then setup setjmp.
10206 */
10207 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10208 if (rcStrict == VINF_SUCCESS)
10209 {
10210#ifdef IEM_WITH_SETJMP
10211 jmp_buf JmpBuf;
10212 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10213 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10214 pVCpu->iem.s.cActiveMappings = 0;
10215 if ((rcStrict = setjmp(JmpBuf)) == 0)
10216#endif
10217 {
10218#ifdef IN_RING0
10219 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10220#endif
10221 uint32_t cInstructionSinceLastExit = 0;
10222
10223 /*
10224 * The run loop. We limit ourselves to 4096 instructions right now.
10225 */
10226 PVM pVM = pVCpu->CTX_SUFF(pVM);
10227 for (;;)
10228 {
10229 /*
10230 * Log the state.
10231 */
10232#ifdef LOG_ENABLED
10233 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10234#endif
10235
10236 /*
10237 * Do the decoding and emulation.
10238 */
10239 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10240
10241 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10242 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10243
10244 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10245 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10246 {
10247 pStats->cExits += 1;
10248 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10249 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10250 cInstructionSinceLastExit = 0;
10251 }
10252
10253 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10254 {
10255 Assert(pVCpu->iem.s.cActiveMappings == 0);
10256 pVCpu->iem.s.cInstructions++;
10257 pStats->cInstructions++;
10258 cInstructionSinceLastExit++;
10259
10260#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10261 /* Perform any VMX nested-guest instruction boundary actions. */
10262 uint64_t fCpu = pVCpu->fLocalForcedActions;
10263 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10264 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10265 { /* likely */ }
10266 else
10267 {
10268 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10269 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10270 fCpu = pVCpu->fLocalForcedActions;
10271 else
10272 {
10273 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10274 break;
10275 }
10276 }
10277#endif
10278 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10279 {
10280#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10281 uint64_t fCpu = pVCpu->fLocalForcedActions;
10282#endif
10283 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10284 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10285 | VMCPU_FF_TLB_FLUSH
10286 | VMCPU_FF_INHIBIT_INTERRUPTS
10287 | VMCPU_FF_BLOCK_NMIS
10288 | VMCPU_FF_UNHALT );
10289 if (RT_LIKELY( ( ( !fCpu
10290 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10291 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10292 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10293 || pStats->cInstructions < cMinInstructions))
10294 {
10295 if (pStats->cInstructions < cMaxInstructions)
10296 {
10297 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10298 {
10299#ifdef IN_RING0
10300 if ( !fCheckPreemptionPending
10301 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10302#endif
10303 {
10304 Assert(pVCpu->iem.s.cActiveMappings == 0);
10305 iemReInitDecoder(pVCpu);
10306 continue;
10307 }
10308#ifdef IN_RING0
10309 rcStrict = VINF_EM_RAW_INTERRUPT;
10310 break;
10311#endif
10312 }
10313 }
10314 }
10315 Assert(!(fCpu & VMCPU_FF_IEM));
10316 }
10317 Assert(pVCpu->iem.s.cActiveMappings == 0);
10318 }
10319 else if (pVCpu->iem.s.cActiveMappings > 0)
10320 iemMemRollback(pVCpu);
10321 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10322 break;
10323 }
10324 }
10325#ifdef IEM_WITH_SETJMP
10326 else
10327 {
10328 if (pVCpu->iem.s.cActiveMappings > 0)
10329 iemMemRollback(pVCpu);
10330 pVCpu->iem.s.cLongJumps++;
10331 }
10332 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10333#endif
10334
10335 /*
10336 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10337 */
10338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10339 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10340 }
10341 else
10342 {
10343 if (pVCpu->iem.s.cActiveMappings > 0)
10344 iemMemRollback(pVCpu);
10345
10346#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10347 /*
10348 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10349 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10350 */
10351 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10352#endif
10353 }
10354
10355 /*
10356 * Maybe re-enter raw-mode and log.
10357 */
10358 if (rcStrict != VINF_SUCCESS)
10359 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10360 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10361 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10362 return rcStrict;
10363}
10364
10365
10366/**
10367 * Injects a trap, fault, abort, software interrupt or external interrupt.
10368 *
10369 * The parameter list matches TRPMQueryTrapAll pretty closely.
10370 *
10371 * @returns Strict VBox status code.
10372 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10373 * @param u8TrapNo The trap number.
10374 * @param enmType What type is it (trap/fault/abort), software
10375 * interrupt or hardware interrupt.
10376 * @param uErrCode The error code if applicable.
10377 * @param uCr2 The CR2 value if applicable.
10378 * @param cbInstr The instruction length (only relevant for
10379 * software interrupts).
10380 */
10381VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10382 uint8_t cbInstr)
10383{
10384 iemInitDecoder(pVCpu, false, false);
10385#ifdef DBGFTRACE_ENABLED
10386 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10387 u8TrapNo, enmType, uErrCode, uCr2);
10388#endif
10389
10390 uint32_t fFlags;
10391 switch (enmType)
10392 {
10393 case TRPM_HARDWARE_INT:
10394 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10395 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10396 uErrCode = uCr2 = 0;
10397 break;
10398
10399 case TRPM_SOFTWARE_INT:
10400 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10401 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10402 uErrCode = uCr2 = 0;
10403 break;
10404
10405 case TRPM_TRAP:
10406 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10407 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10408 if (u8TrapNo == X86_XCPT_PF)
10409 fFlags |= IEM_XCPT_FLAGS_CR2;
10410 switch (u8TrapNo)
10411 {
10412 case X86_XCPT_DF:
10413 case X86_XCPT_TS:
10414 case X86_XCPT_NP:
10415 case X86_XCPT_SS:
10416 case X86_XCPT_PF:
10417 case X86_XCPT_AC:
10418 case X86_XCPT_GP:
10419 fFlags |= IEM_XCPT_FLAGS_ERR;
10420 break;
10421 }
10422 break;
10423
10424 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10425 }
10426
10427 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10428
10429 if (pVCpu->iem.s.cActiveMappings > 0)
10430 iemMemRollback(pVCpu);
10431
10432 return rcStrict;
10433}
10434
10435
10436/**
10437 * Injects the active TRPM event.
10438 *
10439 * @returns Strict VBox status code.
10440 * @param pVCpu The cross context virtual CPU structure.
10441 */
10442VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10443{
10444#ifndef IEM_IMPLEMENTS_TASKSWITCH
10445 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10446#else
10447 uint8_t u8TrapNo;
10448 TRPMEVENT enmType;
10449 uint32_t uErrCode;
10450 RTGCUINTPTR uCr2;
10451 uint8_t cbInstr;
10452 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10453 if (RT_FAILURE(rc))
10454 return rc;
10455
10456 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10457 * ICEBP \#DB injection as a special case. */
10458 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10459#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10460 if (rcStrict == VINF_SVM_VMEXIT)
10461 rcStrict = VINF_SUCCESS;
10462#endif
10463#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10464 if (rcStrict == VINF_VMX_VMEXIT)
10465 rcStrict = VINF_SUCCESS;
10466#endif
10467 /** @todo Are there any other codes that imply the event was successfully
10468 * delivered to the guest? See @bugref{6607}. */
10469 if ( rcStrict == VINF_SUCCESS
10470 || rcStrict == VINF_IEM_RAISED_XCPT)
10471 TRPMResetTrap(pVCpu);
10472
10473 return rcStrict;
10474#endif
10475}
10476
10477
10478VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10479{
10480 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10481 return VERR_NOT_IMPLEMENTED;
10482}
10483
10484
10485VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10486{
10487 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10488 return VERR_NOT_IMPLEMENTED;
10489}
10490
10491
10492#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10493/**
10494 * Executes a IRET instruction with default operand size.
10495 *
10496 * This is for PATM.
10497 *
10498 * @returns VBox status code.
10499 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10500 * @param pCtxCore The register frame.
10501 */
10502VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10503{
10504 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10505
10506 iemCtxCoreToCtx(pCtx, pCtxCore);
10507 iemInitDecoder(pVCpu);
10508 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10509 if (rcStrict == VINF_SUCCESS)
10510 iemCtxToCtxCore(pCtxCore, pCtx);
10511 else
10512 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10513 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10514 return rcStrict;
10515}
10516#endif
10517
10518
10519/**
10520 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10521 *
10522 * This API ASSUMES that the caller has already verified that the guest code is
10523 * allowed to access the I/O port. (The I/O port is in the DX register in the
10524 * guest state.)
10525 *
10526 * @returns Strict VBox status code.
10527 * @param pVCpu The cross context virtual CPU structure.
10528 * @param cbValue The size of the I/O port access (1, 2, or 4).
10529 * @param enmAddrMode The addressing mode.
10530 * @param fRepPrefix Indicates whether a repeat prefix is used
10531 * (doesn't matter which for this instruction).
10532 * @param cbInstr The instruction length in bytes.
10533 * @param iEffSeg The effective segment address.
10534 * @param fIoChecked Whether the access to the I/O port has been
10535 * checked or not. It's typically checked in the
10536 * HM scenario.
10537 */
10538VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10539 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10540{
10541 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10542 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10543
10544 /*
10545 * State init.
10546 */
10547 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10548
10549 /*
10550 * Switch orgy for getting to the right handler.
10551 */
10552 VBOXSTRICTRC rcStrict;
10553 if (fRepPrefix)
10554 {
10555 switch (enmAddrMode)
10556 {
10557 case IEMMODE_16BIT:
10558 switch (cbValue)
10559 {
10560 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10561 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10562 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10563 default:
10564 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10565 }
10566 break;
10567
10568 case IEMMODE_32BIT:
10569 switch (cbValue)
10570 {
10571 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10572 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10573 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10574 default:
10575 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10576 }
10577 break;
10578
10579 case IEMMODE_64BIT:
10580 switch (cbValue)
10581 {
10582 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10583 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10584 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10585 default:
10586 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10587 }
10588 break;
10589
10590 default:
10591 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10592 }
10593 }
10594 else
10595 {
10596 switch (enmAddrMode)
10597 {
10598 case IEMMODE_16BIT:
10599 switch (cbValue)
10600 {
10601 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10602 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10603 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10604 default:
10605 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10606 }
10607 break;
10608
10609 case IEMMODE_32BIT:
10610 switch (cbValue)
10611 {
10612 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10613 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10614 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10615 default:
10616 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10617 }
10618 break;
10619
10620 case IEMMODE_64BIT:
10621 switch (cbValue)
10622 {
10623 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10624 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10625 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10626 default:
10627 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10628 }
10629 break;
10630
10631 default:
10632 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10633 }
10634 }
10635
10636 if (pVCpu->iem.s.cActiveMappings)
10637 iemMemRollback(pVCpu);
10638
10639 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10640}
10641
10642
10643/**
10644 * Interface for HM and EM for executing string I/O IN (read) instructions.
10645 *
10646 * This API ASSUMES that the caller has already verified that the guest code is
10647 * allowed to access the I/O port. (The I/O port is in the DX register in the
10648 * guest state.)
10649 *
10650 * @returns Strict VBox status code.
10651 * @param pVCpu The cross context virtual CPU structure.
10652 * @param cbValue The size of the I/O port access (1, 2, or 4).
10653 * @param enmAddrMode The addressing mode.
10654 * @param fRepPrefix Indicates whether a repeat prefix is used
10655 * (doesn't matter which for this instruction).
10656 * @param cbInstr The instruction length in bytes.
10657 * @param fIoChecked Whether the access to the I/O port has been
10658 * checked or not. It's typically checked in the
10659 * HM scenario.
10660 */
10661VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10662 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10663{
10664 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10665
10666 /*
10667 * State init.
10668 */
10669 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10670
10671 /*
10672 * Switch orgy for getting to the right handler.
10673 */
10674 VBOXSTRICTRC rcStrict;
10675 if (fRepPrefix)
10676 {
10677 switch (enmAddrMode)
10678 {
10679 case IEMMODE_16BIT:
10680 switch (cbValue)
10681 {
10682 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10683 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10684 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10685 default:
10686 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10687 }
10688 break;
10689
10690 case IEMMODE_32BIT:
10691 switch (cbValue)
10692 {
10693 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10694 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10695 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10696 default:
10697 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10698 }
10699 break;
10700
10701 case IEMMODE_64BIT:
10702 switch (cbValue)
10703 {
10704 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10705 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10706 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10707 default:
10708 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10709 }
10710 break;
10711
10712 default:
10713 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10714 }
10715 }
10716 else
10717 {
10718 switch (enmAddrMode)
10719 {
10720 case IEMMODE_16BIT:
10721 switch (cbValue)
10722 {
10723 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10724 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10725 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10726 default:
10727 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10728 }
10729 break;
10730
10731 case IEMMODE_32BIT:
10732 switch (cbValue)
10733 {
10734 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10735 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10736 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10737 default:
10738 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10739 }
10740 break;
10741
10742 case IEMMODE_64BIT:
10743 switch (cbValue)
10744 {
10745 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10746 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10747 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10748 default:
10749 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10750 }
10751 break;
10752
10753 default:
10754 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10755 }
10756 }
10757
10758 if ( pVCpu->iem.s.cActiveMappings == 0
10759 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10760 { /* likely */ }
10761 else
10762 {
10763 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10764 iemMemRollback(pVCpu);
10765 }
10766 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10767}
10768
10769
10770/**
10771 * Interface for rawmode to write execute an OUT instruction.
10772 *
10773 * @returns Strict VBox status code.
10774 * @param pVCpu The cross context virtual CPU structure.
10775 * @param cbInstr The instruction length in bytes.
10776 * @param u16Port The port to read.
10777 * @param fImm Whether the port is specified using an immediate operand or
10778 * using the implicit DX register.
10779 * @param cbReg The register size.
10780 *
10781 * @remarks In ring-0 not all of the state needs to be synced in.
10782 */
10783VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10784{
10785 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10786 Assert(cbReg <= 4 && cbReg != 3);
10787
10788 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10789 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10790 Assert(!pVCpu->iem.s.cActiveMappings);
10791 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10792}
10793
10794
10795/**
10796 * Interface for rawmode to write execute an IN instruction.
10797 *
10798 * @returns Strict VBox status code.
10799 * @param pVCpu The cross context virtual CPU structure.
10800 * @param cbInstr The instruction length in bytes.
10801 * @param u16Port The port to read.
10802 * @param fImm Whether the port is specified using an immediate operand or
10803 * using the implicit DX.
10804 * @param cbReg The register size.
10805 */
10806VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10807{
10808 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10809 Assert(cbReg <= 4 && cbReg != 3);
10810
10811 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10812 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10813 Assert(!pVCpu->iem.s.cActiveMappings);
10814 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10815}
10816
10817
10818/**
10819 * Interface for HM and EM to write to a CRx register.
10820 *
10821 * @returns Strict VBox status code.
10822 * @param pVCpu The cross context virtual CPU structure.
10823 * @param cbInstr The instruction length in bytes.
10824 * @param iCrReg The control register number (destination).
10825 * @param iGReg The general purpose register number (source).
10826 *
10827 * @remarks In ring-0 not all of the state needs to be synced in.
10828 */
10829VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10830{
10831 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10832 Assert(iCrReg < 16);
10833 Assert(iGReg < 16);
10834
10835 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10836 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10837 Assert(!pVCpu->iem.s.cActiveMappings);
10838 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10839}
10840
10841
10842/**
10843 * Interface for HM and EM to read from a CRx register.
10844 *
10845 * @returns Strict VBox status code.
10846 * @param pVCpu The cross context virtual CPU structure.
10847 * @param cbInstr The instruction length in bytes.
10848 * @param iGReg The general purpose register number (destination).
10849 * @param iCrReg The control register number (source).
10850 *
10851 * @remarks In ring-0 not all of the state needs to be synced in.
10852 */
10853VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10854{
10855 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10856 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10857 | CPUMCTX_EXTRN_APIC_TPR);
10858 Assert(iCrReg < 16);
10859 Assert(iGReg < 16);
10860
10861 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10862 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10863 Assert(!pVCpu->iem.s.cActiveMappings);
10864 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10865}
10866
10867
10868/**
10869 * Interface for HM and EM to clear the CR0[TS] bit.
10870 *
10871 * @returns Strict VBox status code.
10872 * @param pVCpu The cross context virtual CPU structure.
10873 * @param cbInstr The instruction length in bytes.
10874 *
10875 * @remarks In ring-0 not all of the state needs to be synced in.
10876 */
10877VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10878{
10879 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10880
10881 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10882 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10883 Assert(!pVCpu->iem.s.cActiveMappings);
10884 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10885}
10886
10887
10888/**
10889 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10890 *
10891 * @returns Strict VBox status code.
10892 * @param pVCpu The cross context virtual CPU structure.
10893 * @param cbInstr The instruction length in bytes.
10894 * @param uValue The value to load into CR0.
10895 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10896 * memory operand. Otherwise pass NIL_RTGCPTR.
10897 *
10898 * @remarks In ring-0 not all of the state needs to be synced in.
10899 */
10900VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10901{
10902 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10903
10904 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10905 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10906 Assert(!pVCpu->iem.s.cActiveMappings);
10907 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10908}
10909
10910
10911/**
10912 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10913 *
10914 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10915 *
10916 * @returns Strict VBox status code.
10917 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10918 * @param cbInstr The instruction length in bytes.
10919 * @remarks In ring-0 not all of the state needs to be synced in.
10920 * @thread EMT(pVCpu)
10921 */
10922VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10923{
10924 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10925
10926 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10927 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10928 Assert(!pVCpu->iem.s.cActiveMappings);
10929 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10930}
10931
10932
10933/**
10934 * Interface for HM and EM to emulate the WBINVD instruction.
10935 *
10936 * @returns Strict VBox status code.
10937 * @param pVCpu The cross context virtual CPU structure.
10938 * @param cbInstr The instruction length in bytes.
10939 *
10940 * @remarks In ring-0 not all of the state needs to be synced in.
10941 */
10942VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10943{
10944 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10945
10946 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10947 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10948 Assert(!pVCpu->iem.s.cActiveMappings);
10949 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10950}
10951
10952
10953/**
10954 * Interface for HM and EM to emulate the INVD instruction.
10955 *
10956 * @returns Strict VBox status code.
10957 * @param pVCpu The cross context virtual CPU structure.
10958 * @param cbInstr The instruction length in bytes.
10959 *
10960 * @remarks In ring-0 not all of the state needs to be synced in.
10961 */
10962VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10963{
10964 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10965
10966 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10967 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10968 Assert(!pVCpu->iem.s.cActiveMappings);
10969 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10970}
10971
10972
10973/**
10974 * Interface for HM and EM to emulate the INVLPG instruction.
10975 *
10976 * @returns Strict VBox status code.
10977 * @retval VINF_PGM_SYNC_CR3
10978 *
10979 * @param pVCpu The cross context virtual CPU structure.
10980 * @param cbInstr The instruction length in bytes.
10981 * @param GCPtrPage The effective address of the page to invalidate.
10982 *
10983 * @remarks In ring-0 not all of the state needs to be synced in.
10984 */
10985VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10986{
10987 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10988
10989 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10990 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10991 Assert(!pVCpu->iem.s.cActiveMappings);
10992 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10993}
10994
10995
10996/**
10997 * Interface for HM and EM to emulate the INVPCID instruction.
10998 *
10999 * @returns Strict VBox status code.
11000 * @retval VINF_PGM_SYNC_CR3
11001 *
11002 * @param pVCpu The cross context virtual CPU structure.
11003 * @param cbInstr The instruction length in bytes.
11004 * @param iEffSeg The effective segment register.
11005 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11006 * @param uType The invalidation type.
11007 *
11008 * @remarks In ring-0 not all of the state needs to be synced in.
11009 */
11010VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11011 uint64_t uType)
11012{
11013 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11014
11015 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11016 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11017 Assert(!pVCpu->iem.s.cActiveMappings);
11018 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11019}
11020
11021
11022/**
11023 * Interface for HM and EM to emulate the CPUID instruction.
11024 *
11025 * @returns Strict VBox status code.
11026 *
11027 * @param pVCpu The cross context virtual CPU structure.
11028 * @param cbInstr The instruction length in bytes.
11029 *
11030 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11031 */
11032VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11033{
11034 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11035 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11036
11037 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11038 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11039 Assert(!pVCpu->iem.s.cActiveMappings);
11040 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11041}
11042
11043
11044/**
11045 * Interface for HM and EM to emulate the RDPMC instruction.
11046 *
11047 * @returns Strict VBox status code.
11048 *
11049 * @param pVCpu The cross context virtual CPU structure.
11050 * @param cbInstr The instruction length in bytes.
11051 *
11052 * @remarks Not all of the state needs to be synced in.
11053 */
11054VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11055{
11056 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11057 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11058
11059 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11060 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11061 Assert(!pVCpu->iem.s.cActiveMappings);
11062 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11063}
11064
11065
11066/**
11067 * Interface for HM and EM to emulate the RDTSC instruction.
11068 *
11069 * @returns Strict VBox status code.
11070 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11071 *
11072 * @param pVCpu The cross context virtual CPU structure.
11073 * @param cbInstr The instruction length in bytes.
11074 *
11075 * @remarks Not all of the state needs to be synced in.
11076 */
11077VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11078{
11079 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11080 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11081
11082 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11083 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11084 Assert(!pVCpu->iem.s.cActiveMappings);
11085 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11086}
11087
11088
11089/**
11090 * Interface for HM and EM to emulate the RDTSCP instruction.
11091 *
11092 * @returns Strict VBox status code.
11093 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11094 *
11095 * @param pVCpu The cross context virtual CPU structure.
11096 * @param cbInstr The instruction length in bytes.
11097 *
11098 * @remarks Not all of the state needs to be synced in. Recommended
11099 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11100 */
11101VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11102{
11103 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11104 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11105
11106 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11107 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11108 Assert(!pVCpu->iem.s.cActiveMappings);
11109 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11110}
11111
11112
11113/**
11114 * Interface for HM and EM to emulate the RDMSR instruction.
11115 *
11116 * @returns Strict VBox status code.
11117 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11118 *
11119 * @param pVCpu The cross context virtual CPU structure.
11120 * @param cbInstr The instruction length in bytes.
11121 *
11122 * @remarks Not all of the state needs to be synced in. Requires RCX and
11123 * (currently) all MSRs.
11124 */
11125VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11126{
11127 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11128 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11129
11130 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11131 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11132 Assert(!pVCpu->iem.s.cActiveMappings);
11133 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11134}
11135
11136
11137/**
11138 * Interface for HM and EM to emulate the WRMSR instruction.
11139 *
11140 * @returns Strict VBox status code.
11141 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11142 *
11143 * @param pVCpu The cross context virtual CPU structure.
11144 * @param cbInstr The instruction length in bytes.
11145 *
11146 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11147 * and (currently) all MSRs.
11148 */
11149VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11150{
11151 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11152 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11153 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11154
11155 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11156 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11157 Assert(!pVCpu->iem.s.cActiveMappings);
11158 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11159}
11160
11161
11162/**
11163 * Interface for HM and EM to emulate the MONITOR instruction.
11164 *
11165 * @returns Strict VBox status code.
11166 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11167 *
11168 * @param pVCpu The cross context virtual CPU structure.
11169 * @param cbInstr The instruction length in bytes.
11170 *
11171 * @remarks Not all of the state needs to be synced in.
11172 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11173 * are used.
11174 */
11175VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11176{
11177 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11178 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11179
11180 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11181 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11182 Assert(!pVCpu->iem.s.cActiveMappings);
11183 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11184}
11185
11186
11187/**
11188 * Interface for HM and EM to emulate the MWAIT instruction.
11189 *
11190 * @returns Strict VBox status code.
11191 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11192 *
11193 * @param pVCpu The cross context virtual CPU structure.
11194 * @param cbInstr The instruction length in bytes.
11195 *
11196 * @remarks Not all of the state needs to be synced in.
11197 */
11198VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11199{
11200 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11201 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11202
11203 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11204 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11205 Assert(!pVCpu->iem.s.cActiveMappings);
11206 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11207}
11208
11209
11210/**
11211 * Interface for HM and EM to emulate the HLT instruction.
11212 *
11213 * @returns Strict VBox status code.
11214 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11215 *
11216 * @param pVCpu The cross context virtual CPU structure.
11217 * @param cbInstr The instruction length in bytes.
11218 *
11219 * @remarks Not all of the state needs to be synced in.
11220 */
11221VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11222{
11223 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11224
11225 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11226 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11227 Assert(!pVCpu->iem.s.cActiveMappings);
11228 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11229}
11230
11231
11232/**
11233 * Checks if IEM is in the process of delivering an event (interrupt or
11234 * exception).
11235 *
11236 * @returns true if we're in the process of raising an interrupt or exception,
11237 * false otherwise.
11238 * @param pVCpu The cross context virtual CPU structure.
11239 * @param puVector Where to store the vector associated with the
11240 * currently delivered event, optional.
11241 * @param pfFlags Where to store th event delivery flags (see
11242 * IEM_XCPT_FLAGS_XXX), optional.
11243 * @param puErr Where to store the error code associated with the
11244 * event, optional.
11245 * @param puCr2 Where to store the CR2 associated with the event,
11246 * optional.
11247 * @remarks The caller should check the flags to determine if the error code and
11248 * CR2 are valid for the event.
11249 */
11250VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11251{
11252 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11253 if (fRaisingXcpt)
11254 {
11255 if (puVector)
11256 *puVector = pVCpu->iem.s.uCurXcpt;
11257 if (pfFlags)
11258 *pfFlags = pVCpu->iem.s.fCurXcpt;
11259 if (puErr)
11260 *puErr = pVCpu->iem.s.uCurXcptErr;
11261 if (puCr2)
11262 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11263 }
11264 return fRaisingXcpt;
11265}
11266
11267#ifdef IN_RING3
11268
11269/**
11270 * Handles the unlikely and probably fatal merge cases.
11271 *
11272 * @returns Merged status code.
11273 * @param rcStrict Current EM status code.
11274 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11275 * with @a rcStrict.
11276 * @param iMemMap The memory mapping index. For error reporting only.
11277 * @param pVCpu The cross context virtual CPU structure of the calling
11278 * thread, for error reporting only.
11279 */
11280DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11281 unsigned iMemMap, PVMCPUCC pVCpu)
11282{
11283 if (RT_FAILURE_NP(rcStrict))
11284 return rcStrict;
11285
11286 if (RT_FAILURE_NP(rcStrictCommit))
11287 return rcStrictCommit;
11288
11289 if (rcStrict == rcStrictCommit)
11290 return rcStrictCommit;
11291
11292 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11293 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11294 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11295 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11296 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11297 return VERR_IOM_FF_STATUS_IPE;
11298}
11299
11300
11301/**
11302 * Helper for IOMR3ProcessForceFlag.
11303 *
11304 * @returns Merged status code.
11305 * @param rcStrict Current EM status code.
11306 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11307 * with @a rcStrict.
11308 * @param iMemMap The memory mapping index. For error reporting only.
11309 * @param pVCpu The cross context virtual CPU structure of the calling
11310 * thread, for error reporting only.
11311 */
11312DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11313{
11314 /* Simple. */
11315 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11316 return rcStrictCommit;
11317
11318 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11319 return rcStrict;
11320
11321 /* EM scheduling status codes. */
11322 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11323 && rcStrict <= VINF_EM_LAST))
11324 {
11325 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11326 && rcStrictCommit <= VINF_EM_LAST))
11327 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11328 }
11329
11330 /* Unlikely */
11331 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11332}
11333
11334
11335/**
11336 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11337 *
11338 * @returns Merge between @a rcStrict and what the commit operation returned.
11339 * @param pVM The cross context VM structure.
11340 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11341 * @param rcStrict The status code returned by ring-0 or raw-mode.
11342 */
11343VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11344{
11345 /*
11346 * Reset the pending commit.
11347 */
11348 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11349 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11350 ("%#x %#x %#x\n",
11351 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11352 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11353
11354 /*
11355 * Commit the pending bounce buffers (usually just one).
11356 */
11357 unsigned cBufs = 0;
11358 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11359 while (iMemMap-- > 0)
11360 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11361 {
11362 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11363 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11364 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11365
11366 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11367 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11368 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11369
11370 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11371 {
11372 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11374 pbBuf,
11375 cbFirst,
11376 PGMACCESSORIGIN_IEM);
11377 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11378 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11379 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11380 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11381 }
11382
11383 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11384 {
11385 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11386 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11387 pbBuf + cbFirst,
11388 cbSecond,
11389 PGMACCESSORIGIN_IEM);
11390 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11391 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11392 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11393 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11394 }
11395 cBufs++;
11396 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11397 }
11398
11399 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11400 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11401 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11402 pVCpu->iem.s.cActiveMappings = 0;
11403 return rcStrict;
11404}
11405
11406#endif /* IN_RING3 */
11407
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette