VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 95410

Last change on this file since 95410 was 95410, checked in by vboxsync, 3 years ago

VMM/IEM: Alignment checks (#AC(0)/#GP(0)). bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 451.3 KB
Line 
1/* $Id: IEMAll.cpp 95410 2022-06-28 18:33:26Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow) : Basic enter/exit IEM state info.
65 * - Level 2 (Log2) : ?
66 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
67 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5) : Decoding details.
69 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7) : iret++ execution logging.
71 * - Level 8 (Log8) : Memory writes.
72 * - Level 9 (Log9) : Memory reads.
73 * - Level 10 (Log10): TLBs.
74 */
75
76/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
77#ifdef _MSC_VER
78# pragma warning(disable:4505)
79#endif
80
81
82/*********************************************************************************************************************************
83* Header Files *
84*********************************************************************************************************************************/
85#define LOG_GROUP LOG_GROUP_IEM
86#define VMCPU_INCL_CPUM_GST_CTX
87#include <VBox/vmm/iem.h>
88#include <VBox/vmm/cpum.h>
89#include <VBox/vmm/apic.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <VBox/vmm/iom.h>
93#include <VBox/vmm/em.h>
94#include <VBox/vmm/hm.h>
95#include <VBox/vmm/nem.h>
96#include <VBox/vmm/gim.h>
97#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
98# include <VBox/vmm/em.h>
99# include <VBox/vmm/hm_svm.h>
100#endif
101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
102# include <VBox/vmm/hmvmxinline.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#include "IEMInternal.h"
108#include <VBox/vmm/vmcc.h>
109#include <VBox/log.h>
110#include <VBox/err.h>
111#include <VBox/param.h>
112#include <VBox/dis.h>
113#include <VBox/disopcode.h>
114#include <iprt/asm-math.h>
115#include <iprt/assert.h>
116#include <iprt/string.h>
117#include <iprt/x86.h>
118
119#include "IEMInline.h"
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/**
126 * CPU exception classes.
127 */
128typedef enum IEMXCPTCLASS
129{
130 IEMXCPTCLASS_BENIGN,
131 IEMXCPTCLASS_CONTRIBUTORY,
132 IEMXCPTCLASS_PAGE_FAULT,
133 IEMXCPTCLASS_DOUBLE_FAULT
134} IEMXCPTCLASS;
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140#if defined(IEM_LOG_MEMORY_WRITES)
141/** What IEM just wrote. */
142uint8_t g_abIemWrote[256];
143/** How much IEM just wrote. */
144size_t g_cbIemWrote;
145#endif
146
147
148/*********************************************************************************************************************************
149* Internal Functions *
150*********************************************************************************************************************************/
151static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
152 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
153
154
155/**
156 * Initializes the decoder state.
157 *
158 * iemReInitDecoder is mostly a copy of this function.
159 *
160 * @param pVCpu The cross context virtual CPU structure of the
161 * calling thread.
162 * @param fBypassHandlers Whether to bypass access handlers.
163 * @param fDisregardLock Whether to disregard the LOCK prefix.
164 */
165DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
166{
167 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
168 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
177
178 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
179 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
180 pVCpu->iem.s.enmCpuMode = enmMode;
181 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
182 pVCpu->iem.s.enmEffAddrMode = enmMode;
183 if (enmMode != IEMMODE_64BIT)
184 {
185 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
186 pVCpu->iem.s.enmEffOpSize = enmMode;
187 }
188 else
189 {
190 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
191 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
192 }
193 pVCpu->iem.s.fPrefixes = 0;
194 pVCpu->iem.s.uRexReg = 0;
195 pVCpu->iem.s.uRexB = 0;
196 pVCpu->iem.s.uRexIndex = 0;
197 pVCpu->iem.s.idxPrefix = 0;
198 pVCpu->iem.s.uVex3rdReg = 0;
199 pVCpu->iem.s.uVexLength = 0;
200 pVCpu->iem.s.fEvexStuff = 0;
201 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
202#ifdef IEM_WITH_CODE_TLB
203 pVCpu->iem.s.pbInstrBuf = NULL;
204 pVCpu->iem.s.offInstrNextByte = 0;
205 pVCpu->iem.s.offCurInstrStart = 0;
206# ifdef VBOX_STRICT
207 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
208 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
209 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
210# endif
211#else
212 pVCpu->iem.s.offOpcode = 0;
213 pVCpu->iem.s.cbOpcode = 0;
214#endif
215 pVCpu->iem.s.offModRm = 0;
216 pVCpu->iem.s.cActiveMappings = 0;
217 pVCpu->iem.s.iNextMapping = 0;
218 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
219 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
220 pVCpu->iem.s.fDisregardLock = fDisregardLock;
221
222#ifdef DBGFTRACE_ENABLED
223 switch (enmMode)
224 {
225 case IEMMODE_64BIT:
226 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
227 break;
228 case IEMMODE_32BIT:
229 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
230 break;
231 case IEMMODE_16BIT:
232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
233 break;
234 }
235#endif
236}
237
238
239/**
240 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
241 *
242 * This is mostly a copy of iemInitDecoder.
243 *
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 */
246DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
247{
248 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
257
258 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
259 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
260 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
261 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
262 pVCpu->iem.s.enmEffAddrMode = enmMode;
263 if (enmMode != IEMMODE_64BIT)
264 {
265 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffOpSize = enmMode;
267 }
268 else
269 {
270 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
271 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
272 }
273 pVCpu->iem.s.fPrefixes = 0;
274 pVCpu->iem.s.uRexReg = 0;
275 pVCpu->iem.s.uRexB = 0;
276 pVCpu->iem.s.uRexIndex = 0;
277 pVCpu->iem.s.idxPrefix = 0;
278 pVCpu->iem.s.uVex3rdReg = 0;
279 pVCpu->iem.s.uVexLength = 0;
280 pVCpu->iem.s.fEvexStuff = 0;
281 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
282#ifdef IEM_WITH_CODE_TLB
283 if (pVCpu->iem.s.pbInstrBuf)
284 {
285 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
286 - pVCpu->iem.s.uInstrBufPc;
287 if (off < pVCpu->iem.s.cbInstrBufTotal)
288 {
289 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
290 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
291 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
292 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
293 else
294 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
295 }
296 else
297 {
298 pVCpu->iem.s.pbInstrBuf = NULL;
299 pVCpu->iem.s.offInstrNextByte = 0;
300 pVCpu->iem.s.offCurInstrStart = 0;
301 pVCpu->iem.s.cbInstrBuf = 0;
302 pVCpu->iem.s.cbInstrBufTotal = 0;
303 }
304 }
305 else
306 {
307 pVCpu->iem.s.offInstrNextByte = 0;
308 pVCpu->iem.s.offCurInstrStart = 0;
309 pVCpu->iem.s.cbInstrBuf = 0;
310 pVCpu->iem.s.cbInstrBufTotal = 0;
311 }
312#else
313 pVCpu->iem.s.cbOpcode = 0;
314 pVCpu->iem.s.offOpcode = 0;
315#endif
316 pVCpu->iem.s.offModRm = 0;
317 Assert(pVCpu->iem.s.cActiveMappings == 0);
318 pVCpu->iem.s.iNextMapping = 0;
319 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
320 Assert(pVCpu->iem.s.fBypassHandlers == false);
321
322#ifdef DBGFTRACE_ENABLED
323 switch (enmMode)
324 {
325 case IEMMODE_64BIT:
326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
327 break;
328 case IEMMODE_32BIT:
329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
330 break;
331 case IEMMODE_16BIT:
332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
333 break;
334 }
335#endif
336}
337
338
339
340/**
341 * Prefetch opcodes the first time when starting executing.
342 *
343 * @returns Strict VBox status code.
344 * @param pVCpu The cross context virtual CPU structure of the
345 * calling thread.
346 * @param fBypassHandlers Whether to bypass access handlers.
347 * @param fDisregardLock Whether to disregard LOCK prefixes.
348 *
349 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
350 * store them as such.
351 */
352static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
353{
354 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
355
356#ifdef IEM_WITH_CODE_TLB
357 /** @todo Do ITLB lookup here. */
358
359#else /* !IEM_WITH_CODE_TLB */
360
361 /*
362 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
363 *
364 * First translate CS:rIP to a physical address.
365 */
366 uint32_t cbToTryRead;
367 RTGCPTR GCPtrPC;
368 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
369 {
370 cbToTryRead = GUEST_PAGE_SIZE;
371 GCPtrPC = pVCpu->cpum.GstCtx.rip;
372 if (IEM_IS_CANONICAL(GCPtrPC))
373 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
374 else
375 return iemRaiseGeneralProtectionFault0(pVCpu);
376 }
377 else
378 {
379 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
380 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
381 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
382 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
383 else
384 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
385 if (cbToTryRead) { /* likely */ }
386 else /* overflowed */
387 {
388 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
389 cbToTryRead = UINT32_MAX;
390 }
391 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
392 Assert(GCPtrPC <= UINT32_MAX);
393 }
394
395 PGMPTWALK Walk;
396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
397 if (RT_SUCCESS(rc))
398 Assert(Walk.fSucceeded); /* probable. */
399 else
400 {
401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
402#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
403 if (Walk.fFailed & PGM_WALKFAIL_EPT)
404 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
405#endif
406 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
407 }
408 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
409 else
410 {
411 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
413 if (Walk.fFailed & PGM_WALKFAIL_EPT)
414 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
415#endif
416 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
417 }
418 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
419 else
420 {
421 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
422#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
423 if (Walk.fFailed & PGM_WALKFAIL_EPT)
424 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
425#endif
426 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
427 }
428 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
429 /** @todo Check reserved bits and such stuff. PGM is better at doing
430 * that, so do it when implementing the guest virtual address
431 * TLB... */
432
433 /*
434 * Read the bytes at this address.
435 */
436 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
437 if (cbToTryRead > cbLeftOnPage)
438 cbToTryRead = cbLeftOnPage;
439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
441
442 if (!pVCpu->iem.s.fBypassHandlers)
443 {
444 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
446 { /* likely */ }
447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
448 {
449 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
450 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
452 }
453 else
454 {
455 Log((RT_SUCCESS(rcStrict)
456 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
457 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
458 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
459 return rcStrict;
460 }
461 }
462 else
463 {
464 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
465 if (RT_SUCCESS(rc))
466 { /* likely */ }
467 else
468 {
469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
470 GCPtrPC, GCPhys, rc, cbToTryRead));
471 return rc;
472 }
473 }
474 pVCpu->iem.s.cbOpcode = cbToTryRead;
475#endif /* !IEM_WITH_CODE_TLB */
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Invalidates the IEM TLBs.
482 *
483 * This is called internally as well as by PGM when moving GC mappings.
484 *
485 * @returns
486 * @param pVCpu The cross context virtual CPU structure of the calling
487 * thread.
488 */
489VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
490{
491#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
492 Log10(("IEMTlbInvalidateAll\n"));
493# ifdef IEM_WITH_CODE_TLB
494 pVCpu->iem.s.cbInstrBufTotal = 0;
495 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
496 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
497 { /* very likely */ }
498 else
499 {
500 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
501 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
502 while (i-- > 0)
503 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
504 }
505# endif
506
507# ifdef IEM_WITH_DATA_TLB
508 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
509 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
510 { /* very likely */ }
511 else
512 {
513 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
515 while (i-- > 0)
516 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
517 }
518# endif
519#else
520 RT_NOREF(pVCpu);
521#endif
522}
523
524
525/**
526 * Invalidates a page in the TLBs.
527 *
528 * @param pVCpu The cross context virtual CPU structure of the calling
529 * thread.
530 * @param GCPtr The address of the page to invalidate
531 * @thread EMT(pVCpu)
532 */
533VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
534{
535#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
536 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
537 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
538 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
539
540# ifdef IEM_WITH_CODE_TLB
541 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
542 {
543 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
544 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
545 pVCpu->iem.s.cbInstrBufTotal = 0;
546 }
547# endif
548
549# ifdef IEM_WITH_DATA_TLB
550 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
551 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
552# endif
553#else
554 NOREF(pVCpu); NOREF(GCPtr);
555#endif
556}
557
558
559#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
560/**
561 * Invalid both TLBs slow fashion following a rollover.
562 *
563 * Worker for IEMTlbInvalidateAllPhysical,
564 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
565 * iemMemMapJmp and others.
566 *
567 * @thread EMT(pVCpu)
568 */
569static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
570{
571 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
572 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
573 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
574
575 unsigned i;
576# ifdef IEM_WITH_CODE_TLB
577 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
578 while (i-- > 0)
579 {
580 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
581 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
582 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
583 }
584# endif
585# ifdef IEM_WITH_DATA_TLB
586 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
587 while (i-- > 0)
588 {
589 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
590 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
591 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
592 }
593# endif
594
595}
596#endif
597
598
599/**
600 * Invalidates the host physical aspects of the IEM TLBs.
601 *
602 * This is called internally as well as by PGM when moving GC mappings.
603 *
604 * @param pVCpu The cross context virtual CPU structure of the calling
605 * thread.
606 * @note Currently not used.
607 */
608VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
609{
610#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
611 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
612 Log10(("IEMTlbInvalidateAllPhysical\n"));
613
614# ifdef IEM_WITH_CODE_TLB
615 pVCpu->iem.s.cbInstrBufTotal = 0;
616# endif
617 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
618 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
619 {
620 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
621 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
622 }
623 else
624 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
625#else
626 NOREF(pVCpu);
627#endif
628}
629
630
631/**
632 * Invalidates the host physical aspects of the IEM TLBs.
633 *
634 * This is called internally as well as by PGM when moving GC mappings.
635 *
636 * @param pVM The cross context VM structure.
637 * @param idCpuCaller The ID of the calling EMT if available to the caller,
638 * otherwise NIL_VMCPUID.
639 *
640 * @remarks Caller holds the PGM lock.
641 */
642VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
643{
644#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
645 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
646 if (pVCpuCaller)
647 VMCPU_ASSERT_EMT(pVCpuCaller);
648 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
649
650 VMCC_FOR_EACH_VMCPU(pVM)
651 {
652# ifdef IEM_WITH_CODE_TLB
653 if (pVCpuCaller == pVCpu)
654 pVCpu->iem.s.cbInstrBufTotal = 0;
655# endif
656
657 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
658 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
659 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
660 { /* likely */}
661 else if (pVCpuCaller == pVCpu)
662 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
663 else
664 {
665 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
666 continue;
667 }
668 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
669 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
670 }
671 VMCC_FOR_EACH_VMCPU_END(pVM);
672
673#else
674 RT_NOREF(pVM, idCpuCaller);
675#endif
676}
677
678#ifdef IEM_WITH_CODE_TLB
679
680/**
681 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
682 * failure and jumps.
683 *
684 * We end up here for a number of reasons:
685 * - pbInstrBuf isn't yet initialized.
686 * - Advancing beyond the buffer boundrary (e.g. cross page).
687 * - Advancing beyond the CS segment limit.
688 * - Fetching from non-mappable page (e.g. MMIO).
689 *
690 * @param pVCpu The cross context virtual CPU structure of the
691 * calling thread.
692 * @param pvDst Where to return the bytes.
693 * @param cbDst Number of bytes to read.
694 *
695 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
696 */
697void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
698{
699#ifdef IN_RING3
700 for (;;)
701 {
702 Assert(cbDst <= 8);
703 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
704
705 /*
706 * We might have a partial buffer match, deal with that first to make the
707 * rest simpler. This is the first part of the cross page/buffer case.
708 */
709 if (pVCpu->iem.s.pbInstrBuf != NULL)
710 {
711 if (offBuf < pVCpu->iem.s.cbInstrBuf)
712 {
713 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
714 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
715 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
716
717 cbDst -= cbCopy;
718 pvDst = (uint8_t *)pvDst + cbCopy;
719 offBuf += cbCopy;
720 pVCpu->iem.s.offInstrNextByte += offBuf;
721 }
722 }
723
724 /*
725 * Check segment limit, figuring how much we're allowed to access at this point.
726 *
727 * We will fault immediately if RIP is past the segment limit / in non-canonical
728 * territory. If we do continue, there are one or more bytes to read before we
729 * end up in trouble and we need to do that first before faulting.
730 */
731 RTGCPTR GCPtrFirst;
732 uint32_t cbMaxRead;
733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
734 {
735 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
736 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
737 { /* likely */ }
738 else
739 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
740 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
741 }
742 else
743 {
744 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
745 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
746 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
747 { /* likely */ }
748 else
749 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
750 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
751 if (cbMaxRead != 0)
752 { /* likely */ }
753 else
754 {
755 /* Overflowed because address is 0 and limit is max. */
756 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
757 cbMaxRead = X86_PAGE_SIZE;
758 }
759 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
760 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
761 if (cbMaxRead2 < cbMaxRead)
762 cbMaxRead = cbMaxRead2;
763 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
764 }
765
766 /*
767 * Get the TLB entry for this piece of code.
768 */
769 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
770 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
771 if (pTlbe->uTag == uTag)
772 {
773 /* likely when executing lots of code, otherwise unlikely */
774# ifdef VBOX_WITH_STATISTICS
775 pVCpu->iem.s.CodeTlb.cTlbHits++;
776# endif
777 }
778 else
779 {
780 pVCpu->iem.s.CodeTlb.cTlbMisses++;
781 PGMPTWALK Walk;
782 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
783 if (RT_FAILURE(rc))
784 {
785#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
786 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
787 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
788#endif
789 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
790 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
791 }
792
793 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
794 Assert(Walk.fSucceeded);
795 pTlbe->uTag = uTag;
796 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
797 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
798 pTlbe->GCPhys = Walk.GCPhys;
799 pTlbe->pbMappingR3 = NULL;
800 }
801
802 /*
803 * Check TLB page table level access flags.
804 */
805 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
806 {
807 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
808 {
809 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
810 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
811 }
812 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
813 {
814 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
815 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
816 }
817 }
818
819 /*
820 * Look up the physical page info if necessary.
821 */
822 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
823 { /* not necessary */ }
824 else
825 {
826 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
827 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
828 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
829 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
830 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
831 { /* likely */ }
832 else
833 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
834 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
835 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
836 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
837 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
838 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
839 }
840
841# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
842 /*
843 * Try do a direct read using the pbMappingR3 pointer.
844 */
845 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
846 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
847 {
848 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
849 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
850 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
851 {
852 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
853 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
854 }
855 else
856 {
857 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
858 Assert(cbInstr < cbMaxRead);
859 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
860 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
861 }
862 if (cbDst <= cbMaxRead)
863 {
864 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
865 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
866 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
867 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
868 return;
869 }
870 pVCpu->iem.s.pbInstrBuf = NULL;
871
872 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
873 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
874 }
875 else
876# endif
877#if 0
878 /*
879 * If there is no special read handling, so we can read a bit more and
880 * put it in the prefetch buffer.
881 */
882 if ( cbDst < cbMaxRead
883 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
884 {
885 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
886 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
887 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
888 { /* likely */ }
889 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
890 {
891 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
893 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
894 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
895 }
896 else
897 {
898 Log((RT_SUCCESS(rcStrict)
899 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
900 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
901 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
902 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
903 }
904 }
905 /*
906 * Special read handling, so only read exactly what's needed.
907 * This is a highly unlikely scenario.
908 */
909 else
910#endif
911 {
912 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
913 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
914 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
915 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
916 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
917 { /* likely */ }
918 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
919 {
920 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
922 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
923 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
924 }
925 else
926 {
927 Log((RT_SUCCESS(rcStrict)
928 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
929 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
930 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
931 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
932 }
933 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
934 if (cbToRead == cbDst)
935 return;
936 }
937
938 /*
939 * More to read, loop.
940 */
941 cbDst -= cbMaxRead;
942 pvDst = (uint8_t *)pvDst + cbMaxRead;
943 }
944#else
945 RT_NOREF(pvDst, cbDst);
946 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
947#endif
948}
949
950#else
951
952/**
953 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
954 * exception if it fails.
955 *
956 * @returns Strict VBox status code.
957 * @param pVCpu The cross context virtual CPU structure of the
958 * calling thread.
959 * @param cbMin The minimum number of bytes relative offOpcode
960 * that must be read.
961 */
962VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
963{
964 /*
965 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
966 *
967 * First translate CS:rIP to a physical address.
968 */
969 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
970 uint32_t cbToTryRead;
971 RTGCPTR GCPtrNext;
972 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
973 {
974 cbToTryRead = GUEST_PAGE_SIZE;
975 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
976 if (!IEM_IS_CANONICAL(GCPtrNext))
977 return iemRaiseGeneralProtectionFault0(pVCpu);
978 }
979 else
980 {
981 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
982 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
983 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
984 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
985 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
986 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
987 if (!cbToTryRead) /* overflowed */
988 {
989 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
990 cbToTryRead = UINT32_MAX;
991 /** @todo check out wrapping around the code segment. */
992 }
993 if (cbToTryRead < cbMin - cbLeft)
994 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
995 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
996 }
997
998 /* Only read up to the end of the page, and make sure we don't read more
999 than the opcode buffer can hold. */
1000 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1001 if (cbToTryRead > cbLeftOnPage)
1002 cbToTryRead = cbLeftOnPage;
1003 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1004 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1005/** @todo r=bird: Convert assertion into undefined opcode exception? */
1006 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1007
1008 PGMPTWALK Walk;
1009 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1010 if (RT_FAILURE(rc))
1011 {
1012 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1014 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1015 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1016#endif
1017 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1018 }
1019 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1023 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1024 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1025#endif
1026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1029 {
1030 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1031#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1032 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1033 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1034#endif
1035 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1036 }
1037 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1038 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1039 /** @todo Check reserved bits and such stuff. PGM is better at doing
1040 * that, so do it when implementing the guest virtual address
1041 * TLB... */
1042
1043 /*
1044 * Read the bytes at this address.
1045 *
1046 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1047 * and since PATM should only patch the start of an instruction there
1048 * should be no need to check again here.
1049 */
1050 if (!pVCpu->iem.s.fBypassHandlers)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1053 cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 }
1062 else
1063 {
1064 Log((RT_SUCCESS(rcStrict)
1065 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1066 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1067 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1068 return rcStrict;
1069 }
1070 }
1071 else
1072 {
1073 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1074 if (RT_SUCCESS(rc))
1075 { /* likely */ }
1076 else
1077 {
1078 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1079 return rc;
1080 }
1081 }
1082 pVCpu->iem.s.cbOpcode += cbToTryRead;
1083 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1084
1085 return VINF_SUCCESS;
1086}
1087
1088#endif /* !IEM_WITH_CODE_TLB */
1089#ifndef IEM_WITH_SETJMP
1090
1091/**
1092 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1093 *
1094 * @returns Strict VBox status code.
1095 * @param pVCpu The cross context virtual CPU structure of the
1096 * calling thread.
1097 * @param pb Where to return the opcode byte.
1098 */
1099VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1100{
1101 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1102 if (rcStrict == VINF_SUCCESS)
1103 {
1104 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1105 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1106 pVCpu->iem.s.offOpcode = offOpcode + 1;
1107 }
1108 else
1109 *pb = 0;
1110 return rcStrict;
1111}
1112
1113#else /* IEM_WITH_SETJMP */
1114
1115/**
1116 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1117 *
1118 * @returns The opcode byte.
1119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1120 */
1121uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1122{
1123# ifdef IEM_WITH_CODE_TLB
1124 uint8_t u8;
1125 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1126 return u8;
1127# else
1128 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1129 if (rcStrict == VINF_SUCCESS)
1130 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1131 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1132# endif
1133}
1134
1135#endif /* IEM_WITH_SETJMP */
1136
1137#ifndef IEM_WITH_SETJMP
1138
1139/**
1140 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1141 *
1142 * @returns Strict VBox status code.
1143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1144 * @param pu16 Where to return the opcode dword.
1145 */
1146VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1147{
1148 uint8_t u8;
1149 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1150 if (rcStrict == VINF_SUCCESS)
1151 *pu16 = (int8_t)u8;
1152 return rcStrict;
1153}
1154
1155
1156/**
1157 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1158 *
1159 * @returns Strict VBox status code.
1160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1161 * @param pu32 Where to return the opcode dword.
1162 */
1163VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1164{
1165 uint8_t u8;
1166 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1167 if (rcStrict == VINF_SUCCESS)
1168 *pu32 = (int8_t)u8;
1169 return rcStrict;
1170}
1171
1172
1173/**
1174 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1175 *
1176 * @returns Strict VBox status code.
1177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1178 * @param pu64 Where to return the opcode qword.
1179 */
1180VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1181{
1182 uint8_t u8;
1183 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1184 if (rcStrict == VINF_SUCCESS)
1185 *pu64 = (int8_t)u8;
1186 return rcStrict;
1187}
1188
1189#endif /* !IEM_WITH_SETJMP */
1190
1191
1192#ifndef IEM_WITH_SETJMP
1193
1194/**
1195 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1196 *
1197 * @returns Strict VBox status code.
1198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1199 * @param pu16 Where to return the opcode word.
1200 */
1201VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1202{
1203 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1204 if (rcStrict == VINF_SUCCESS)
1205 {
1206 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1207# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1208 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1209# else
1210 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1211# endif
1212 pVCpu->iem.s.offOpcode = offOpcode + 2;
1213 }
1214 else
1215 *pu16 = 0;
1216 return rcStrict;
1217}
1218
1219#else /* IEM_WITH_SETJMP */
1220
1221/**
1222 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1223 *
1224 * @returns The opcode word.
1225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1226 */
1227uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1228{
1229# ifdef IEM_WITH_CODE_TLB
1230 uint16_t u16;
1231 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1232 return u16;
1233# else
1234 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1235 if (rcStrict == VINF_SUCCESS)
1236 {
1237 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1238 pVCpu->iem.s.offOpcode += 2;
1239# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1240 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1241# else
1242 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1243# endif
1244 }
1245 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1246# endif
1247}
1248
1249#endif /* IEM_WITH_SETJMP */
1250
1251#ifndef IEM_WITH_SETJMP
1252
1253/**
1254 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1255 *
1256 * @returns Strict VBox status code.
1257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1258 * @param pu32 Where to return the opcode double word.
1259 */
1260VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1261{
1262 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1263 if (rcStrict == VINF_SUCCESS)
1264 {
1265 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1266 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1267 pVCpu->iem.s.offOpcode = offOpcode + 2;
1268 }
1269 else
1270 *pu32 = 0;
1271 return rcStrict;
1272}
1273
1274
1275/**
1276 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1280 * @param pu64 Where to return the opcode quad word.
1281 */
1282VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1283{
1284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1285 if (rcStrict == VINF_SUCCESS)
1286 {
1287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1288 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1289 pVCpu->iem.s.offOpcode = offOpcode + 2;
1290 }
1291 else
1292 *pu64 = 0;
1293 return rcStrict;
1294}
1295
1296#endif /* !IEM_WITH_SETJMP */
1297
1298#ifndef IEM_WITH_SETJMP
1299
1300/**
1301 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1302 *
1303 * @returns Strict VBox status code.
1304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1305 * @param pu32 Where to return the opcode dword.
1306 */
1307VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1308{
1309 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1310 if (rcStrict == VINF_SUCCESS)
1311 {
1312 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1313# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1314 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1315# else
1316 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1317 pVCpu->iem.s.abOpcode[offOpcode + 1],
1318 pVCpu->iem.s.abOpcode[offOpcode + 2],
1319 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1320# endif
1321 pVCpu->iem.s.offOpcode = offOpcode + 4;
1322 }
1323 else
1324 *pu32 = 0;
1325 return rcStrict;
1326}
1327
1328#else /* IEM_WITH_SETJMP */
1329
1330/**
1331 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1332 *
1333 * @returns The opcode dword.
1334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1335 */
1336uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1337{
1338# ifdef IEM_WITH_CODE_TLB
1339 uint32_t u32;
1340 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1341 return u32;
1342# else
1343 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1344 if (rcStrict == VINF_SUCCESS)
1345 {
1346 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1347 pVCpu->iem.s.offOpcode = offOpcode + 4;
1348# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1349 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1350# else
1351 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1352 pVCpu->iem.s.abOpcode[offOpcode + 1],
1353 pVCpu->iem.s.abOpcode[offOpcode + 2],
1354 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1355# endif
1356 }
1357 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1358# endif
1359}
1360
1361#endif /* IEM_WITH_SETJMP */
1362
1363#ifndef IEM_WITH_SETJMP
1364
1365/**
1366 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1367 *
1368 * @returns Strict VBox status code.
1369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1370 * @param pu64 Where to return the opcode dword.
1371 */
1372VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1373{
1374 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1375 if (rcStrict == VINF_SUCCESS)
1376 {
1377 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1378 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1379 pVCpu->iem.s.abOpcode[offOpcode + 1],
1380 pVCpu->iem.s.abOpcode[offOpcode + 2],
1381 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1382 pVCpu->iem.s.offOpcode = offOpcode + 4;
1383 }
1384 else
1385 *pu64 = 0;
1386 return rcStrict;
1387}
1388
1389
1390/**
1391 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1392 *
1393 * @returns Strict VBox status code.
1394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1395 * @param pu64 Where to return the opcode qword.
1396 */
1397VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1398{
1399 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1400 if (rcStrict == VINF_SUCCESS)
1401 {
1402 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1403 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1404 pVCpu->iem.s.abOpcode[offOpcode + 1],
1405 pVCpu->iem.s.abOpcode[offOpcode + 2],
1406 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1407 pVCpu->iem.s.offOpcode = offOpcode + 4;
1408 }
1409 else
1410 *pu64 = 0;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416#ifndef IEM_WITH_SETJMP
1417
1418/**
1419 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1420 *
1421 * @returns Strict VBox status code.
1422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1423 * @param pu64 Where to return the opcode qword.
1424 */
1425VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1426{
1427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1428 if (rcStrict == VINF_SUCCESS)
1429 {
1430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1432 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1433# else
1434 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1435 pVCpu->iem.s.abOpcode[offOpcode + 1],
1436 pVCpu->iem.s.abOpcode[offOpcode + 2],
1437 pVCpu->iem.s.abOpcode[offOpcode + 3],
1438 pVCpu->iem.s.abOpcode[offOpcode + 4],
1439 pVCpu->iem.s.abOpcode[offOpcode + 5],
1440 pVCpu->iem.s.abOpcode[offOpcode + 6],
1441 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1442# endif
1443 pVCpu->iem.s.offOpcode = offOpcode + 8;
1444 }
1445 else
1446 *pu64 = 0;
1447 return rcStrict;
1448}
1449
1450#else /* IEM_WITH_SETJMP */
1451
1452/**
1453 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1454 *
1455 * @returns The opcode qword.
1456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1457 */
1458uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1459{
1460# ifdef IEM_WITH_CODE_TLB
1461 uint64_t u64;
1462 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1463 return u64;
1464# else
1465 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1466 if (rcStrict == VINF_SUCCESS)
1467 {
1468 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1469 pVCpu->iem.s.offOpcode = offOpcode + 8;
1470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1471 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1472# else
1473 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1474 pVCpu->iem.s.abOpcode[offOpcode + 1],
1475 pVCpu->iem.s.abOpcode[offOpcode + 2],
1476 pVCpu->iem.s.abOpcode[offOpcode + 3],
1477 pVCpu->iem.s.abOpcode[offOpcode + 4],
1478 pVCpu->iem.s.abOpcode[offOpcode + 5],
1479 pVCpu->iem.s.abOpcode[offOpcode + 6],
1480 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1481# endif
1482 }
1483 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1484# endif
1485}
1486
1487#endif /* IEM_WITH_SETJMP */
1488
1489
1490
1491/** @name Misc Worker Functions.
1492 * @{
1493 */
1494
1495/**
1496 * Gets the exception class for the specified exception vector.
1497 *
1498 * @returns The class of the specified exception.
1499 * @param uVector The exception vector.
1500 */
1501static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1502{
1503 Assert(uVector <= X86_XCPT_LAST);
1504 switch (uVector)
1505 {
1506 case X86_XCPT_DE:
1507 case X86_XCPT_TS:
1508 case X86_XCPT_NP:
1509 case X86_XCPT_SS:
1510 case X86_XCPT_GP:
1511 case X86_XCPT_SX: /* AMD only */
1512 return IEMXCPTCLASS_CONTRIBUTORY;
1513
1514 case X86_XCPT_PF:
1515 case X86_XCPT_VE: /* Intel only */
1516 return IEMXCPTCLASS_PAGE_FAULT;
1517
1518 case X86_XCPT_DF:
1519 return IEMXCPTCLASS_DOUBLE_FAULT;
1520 }
1521 return IEMXCPTCLASS_BENIGN;
1522}
1523
1524
1525/**
1526 * Evaluates how to handle an exception caused during delivery of another event
1527 * (exception / interrupt).
1528 *
1529 * @returns How to handle the recursive exception.
1530 * @param pVCpu The cross context virtual CPU structure of the
1531 * calling thread.
1532 * @param fPrevFlags The flags of the previous event.
1533 * @param uPrevVector The vector of the previous event.
1534 * @param fCurFlags The flags of the current exception.
1535 * @param uCurVector The vector of the current exception.
1536 * @param pfXcptRaiseInfo Where to store additional information about the
1537 * exception condition. Optional.
1538 */
1539VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1540 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1541{
1542 /*
1543 * Only CPU exceptions can be raised while delivering other events, software interrupt
1544 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1545 */
1546 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1547 Assert(pVCpu); RT_NOREF(pVCpu);
1548 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1549
1550 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1551 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1552 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1553 {
1554 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1555 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1556 {
1557 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1558 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1559 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1560 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1561 {
1562 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1563 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1564 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1565 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1566 uCurVector, pVCpu->cpum.GstCtx.cr2));
1567 }
1568 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1569 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1570 {
1571 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1572 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1573 }
1574 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1575 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1576 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1577 {
1578 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1579 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1580 }
1581 }
1582 else
1583 {
1584 if (uPrevVector == X86_XCPT_NMI)
1585 {
1586 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1587 if (uCurVector == X86_XCPT_PF)
1588 {
1589 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1590 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1591 }
1592 }
1593 else if ( uPrevVector == X86_XCPT_AC
1594 && uCurVector == X86_XCPT_AC)
1595 {
1596 enmRaise = IEMXCPTRAISE_CPU_HANG;
1597 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1598 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1599 }
1600 }
1601 }
1602 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1603 {
1604 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1605 if (uCurVector == X86_XCPT_PF)
1606 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1607 }
1608 else
1609 {
1610 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1611 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1612 }
1613
1614 if (pfXcptRaiseInfo)
1615 *pfXcptRaiseInfo = fRaiseInfo;
1616 return enmRaise;
1617}
1618
1619
1620/**
1621 * Enters the CPU shutdown state initiated by a triple fault or other
1622 * unrecoverable conditions.
1623 *
1624 * @returns Strict VBox status code.
1625 * @param pVCpu The cross context virtual CPU structure of the
1626 * calling thread.
1627 */
1628static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1629{
1630 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1631 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1632
1633 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1634 {
1635 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1636 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1637 }
1638
1639 RT_NOREF(pVCpu);
1640 return VINF_EM_TRIPLE_FAULT;
1641}
1642
1643
1644/**
1645 * Validates a new SS segment.
1646 *
1647 * @returns VBox strict status code.
1648 * @param pVCpu The cross context virtual CPU structure of the
1649 * calling thread.
1650 * @param NewSS The new SS selctor.
1651 * @param uCpl The CPL to load the stack for.
1652 * @param pDesc Where to return the descriptor.
1653 */
1654static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1655{
1656 /* Null selectors are not allowed (we're not called for dispatching
1657 interrupts with SS=0 in long mode). */
1658 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1659 {
1660 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1661 return iemRaiseTaskSwitchFault0(pVCpu);
1662 }
1663
1664 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1665 if ((NewSS & X86_SEL_RPL) != uCpl)
1666 {
1667 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1668 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1669 }
1670
1671 /*
1672 * Read the descriptor.
1673 */
1674 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1675 if (rcStrict != VINF_SUCCESS)
1676 return rcStrict;
1677
1678 /*
1679 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1680 */
1681 if (!pDesc->Legacy.Gen.u1DescType)
1682 {
1683 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1684 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1685 }
1686
1687 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1688 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1689 {
1690 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1691 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1692 }
1693 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1694 {
1695 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1696 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1697 }
1698
1699 /* Is it there? */
1700 /** @todo testcase: Is this checked before the canonical / limit check below? */
1701 if (!pDesc->Legacy.Gen.u1Present)
1702 {
1703 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1704 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1705 }
1706
1707 return VINF_SUCCESS;
1708}
1709
1710/** @} */
1711
1712
1713/** @name Raising Exceptions.
1714 *
1715 * @{
1716 */
1717
1718
1719/**
1720 * Loads the specified stack far pointer from the TSS.
1721 *
1722 * @returns VBox strict status code.
1723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1724 * @param uCpl The CPL to load the stack for.
1725 * @param pSelSS Where to return the new stack segment.
1726 * @param puEsp Where to return the new stack pointer.
1727 */
1728static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1729{
1730 VBOXSTRICTRC rcStrict;
1731 Assert(uCpl < 4);
1732
1733 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1734 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1735 {
1736 /*
1737 * 16-bit TSS (X86TSS16).
1738 */
1739 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1740 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1741 {
1742 uint32_t off = uCpl * 4 + 2;
1743 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1744 {
1745 /** @todo check actual access pattern here. */
1746 uint32_t u32Tmp = 0; /* gcc maybe... */
1747 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1748 if (rcStrict == VINF_SUCCESS)
1749 {
1750 *puEsp = RT_LOWORD(u32Tmp);
1751 *pSelSS = RT_HIWORD(u32Tmp);
1752 return VINF_SUCCESS;
1753 }
1754 }
1755 else
1756 {
1757 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1758 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1759 }
1760 break;
1761 }
1762
1763 /*
1764 * 32-bit TSS (X86TSS32).
1765 */
1766 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1767 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1768 {
1769 uint32_t off = uCpl * 8 + 4;
1770 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1771 {
1772/** @todo check actual access pattern here. */
1773 uint64_t u64Tmp;
1774 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1775 if (rcStrict == VINF_SUCCESS)
1776 {
1777 *puEsp = u64Tmp & UINT32_MAX;
1778 *pSelSS = (RTSEL)(u64Tmp >> 32);
1779 return VINF_SUCCESS;
1780 }
1781 }
1782 else
1783 {
1784 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1785 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1786 }
1787 break;
1788 }
1789
1790 default:
1791 AssertFailed();
1792 rcStrict = VERR_IEM_IPE_4;
1793 break;
1794 }
1795
1796 *puEsp = 0; /* make gcc happy */
1797 *pSelSS = 0; /* make gcc happy */
1798 return rcStrict;
1799}
1800
1801
1802/**
1803 * Loads the specified stack pointer from the 64-bit TSS.
1804 *
1805 * @returns VBox strict status code.
1806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1807 * @param uCpl The CPL to load the stack for.
1808 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1809 * @param puRsp Where to return the new stack pointer.
1810 */
1811static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1812{
1813 Assert(uCpl < 4);
1814 Assert(uIst < 8);
1815 *puRsp = 0; /* make gcc happy */
1816
1817 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1818 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1819
1820 uint32_t off;
1821 if (uIst)
1822 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1823 else
1824 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1825 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1826 {
1827 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1828 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1829 }
1830
1831 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1832}
1833
1834
1835/**
1836 * Adjust the CPU state according to the exception being raised.
1837 *
1838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1839 * @param u8Vector The exception that has been raised.
1840 */
1841DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1842{
1843 switch (u8Vector)
1844 {
1845 case X86_XCPT_DB:
1846 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1847 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1848 break;
1849 /** @todo Read the AMD and Intel exception reference... */
1850 }
1851}
1852
1853
1854/**
1855 * Implements exceptions and interrupts for real mode.
1856 *
1857 * @returns VBox strict status code.
1858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1859 * @param cbInstr The number of bytes to offset rIP by in the return
1860 * address.
1861 * @param u8Vector The interrupt / exception vector number.
1862 * @param fFlags The flags.
1863 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1864 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1865 */
1866static VBOXSTRICTRC
1867iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1868 uint8_t cbInstr,
1869 uint8_t u8Vector,
1870 uint32_t fFlags,
1871 uint16_t uErr,
1872 uint64_t uCr2) RT_NOEXCEPT
1873{
1874 NOREF(uErr); NOREF(uCr2);
1875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1876
1877 /*
1878 * Read the IDT entry.
1879 */
1880 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1881 {
1882 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1883 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1884 }
1885 RTFAR16 Idte;
1886 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1887 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1888 {
1889 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1890 return rcStrict;
1891 }
1892
1893 /*
1894 * Push the stack frame.
1895 */
1896 uint16_t *pu16Frame;
1897 uint64_t uNewRsp;
1898 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1899 if (rcStrict != VINF_SUCCESS)
1900 return rcStrict;
1901
1902 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1903#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1904 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1905 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1906 fEfl |= UINT16_C(0xf000);
1907#endif
1908 pu16Frame[2] = (uint16_t)fEfl;
1909 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1910 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1911 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1912 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1913 return rcStrict;
1914
1915 /*
1916 * Load the vector address into cs:ip and make exception specific state
1917 * adjustments.
1918 */
1919 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1920 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1921 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1922 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1923 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1924 pVCpu->cpum.GstCtx.rip = Idte.off;
1925 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1926 IEMMISC_SET_EFL(pVCpu, fEfl);
1927
1928 /** @todo do we actually do this in real mode? */
1929 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1930 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1931
1932 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1933}
1934
1935
1936/**
1937 * Loads a NULL data selector into when coming from V8086 mode.
1938 *
1939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1940 * @param pSReg Pointer to the segment register.
1941 */
1942DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1943{
1944 pSReg->Sel = 0;
1945 pSReg->ValidSel = 0;
1946 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1947 {
1948 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1949 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1950 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1951 }
1952 else
1953 {
1954 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1955 /** @todo check this on AMD-V */
1956 pSReg->u64Base = 0;
1957 pSReg->u32Limit = 0;
1958 }
1959}
1960
1961
1962/**
1963 * Loads a segment selector during a task switch in V8086 mode.
1964 *
1965 * @param pSReg Pointer to the segment register.
1966 * @param uSel The selector value to load.
1967 */
1968DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1969{
1970 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1971 pSReg->Sel = uSel;
1972 pSReg->ValidSel = uSel;
1973 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1974 pSReg->u64Base = uSel << 4;
1975 pSReg->u32Limit = 0xffff;
1976 pSReg->Attr.u = 0xf3;
1977}
1978
1979
1980/**
1981 * Loads a segment selector during a task switch in protected mode.
1982 *
1983 * In this task switch scenario, we would throw \#TS exceptions rather than
1984 * \#GPs.
1985 *
1986 * @returns VBox strict status code.
1987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1988 * @param pSReg Pointer to the segment register.
1989 * @param uSel The new selector value.
1990 *
1991 * @remarks This does _not_ handle CS or SS.
1992 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
1993 */
1994static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
1995{
1996 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
1997
1998 /* Null data selector. */
1999 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2000 {
2001 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2003 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2004 return VINF_SUCCESS;
2005 }
2006
2007 /* Fetch the descriptor. */
2008 IEMSELDESC Desc;
2009 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2010 if (rcStrict != VINF_SUCCESS)
2011 {
2012 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2013 VBOXSTRICTRC_VAL(rcStrict)));
2014 return rcStrict;
2015 }
2016
2017 /* Must be a data segment or readable code segment. */
2018 if ( !Desc.Legacy.Gen.u1DescType
2019 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2020 {
2021 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2022 Desc.Legacy.Gen.u4Type));
2023 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2024 }
2025
2026 /* Check privileges for data segments and non-conforming code segments. */
2027 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2028 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2029 {
2030 /* The RPL and the new CPL must be less than or equal to the DPL. */
2031 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2032 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2033 {
2034 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2035 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2036 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2037 }
2038 }
2039
2040 /* Is it there? */
2041 if (!Desc.Legacy.Gen.u1Present)
2042 {
2043 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2044 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2045 }
2046
2047 /* The base and limit. */
2048 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2049 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2050
2051 /*
2052 * Ok, everything checked out fine. Now set the accessed bit before
2053 * committing the result into the registers.
2054 */
2055 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2056 {
2057 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2058 if (rcStrict != VINF_SUCCESS)
2059 return rcStrict;
2060 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2061 }
2062
2063 /* Commit */
2064 pSReg->Sel = uSel;
2065 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2066 pSReg->u32Limit = cbLimit;
2067 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2068 pSReg->ValidSel = uSel;
2069 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2070 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2071 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2072
2073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2074 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2075 return VINF_SUCCESS;
2076}
2077
2078
2079/**
2080 * Performs a task switch.
2081 *
2082 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2083 * caller is responsible for performing the necessary checks (like DPL, TSS
2084 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2085 * reference for JMP, CALL, IRET.
2086 *
2087 * If the task switch is the due to a software interrupt or hardware exception,
2088 * the caller is responsible for validating the TSS selector and descriptor. See
2089 * Intel Instruction reference for INT n.
2090 *
2091 * @returns VBox strict status code.
2092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2093 * @param enmTaskSwitch The cause of the task switch.
2094 * @param uNextEip The EIP effective after the task switch.
2095 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2096 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2097 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2098 * @param SelTSS The TSS selector of the new task.
2099 * @param pNewDescTSS Pointer to the new TSS descriptor.
2100 */
2101VBOXSTRICTRC
2102iemTaskSwitch(PVMCPUCC pVCpu,
2103 IEMTASKSWITCH enmTaskSwitch,
2104 uint32_t uNextEip,
2105 uint32_t fFlags,
2106 uint16_t uErr,
2107 uint64_t uCr2,
2108 RTSEL SelTSS,
2109 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2110{
2111 Assert(!IEM_IS_REAL_MODE(pVCpu));
2112 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2114
2115 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2116 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2117 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2118 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2119 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2120
2121 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2122 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2123
2124 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2125 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2126
2127 /* Update CR2 in case it's a page-fault. */
2128 /** @todo This should probably be done much earlier in IEM/PGM. See
2129 * @bugref{5653#c49}. */
2130 if (fFlags & IEM_XCPT_FLAGS_CR2)
2131 pVCpu->cpum.GstCtx.cr2 = uCr2;
2132
2133 /*
2134 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2135 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2136 */
2137 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2138 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2139 if (uNewTSSLimit < uNewTSSLimitMin)
2140 {
2141 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2142 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2143 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2144 }
2145
2146 /*
2147 * Task switches in VMX non-root mode always cause task switches.
2148 * The new TSS must have been read and validated (DPL, limits etc.) before a
2149 * task-switch VM-exit commences.
2150 *
2151 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2152 */
2153 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2154 {
2155 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2156 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2157 }
2158
2159 /*
2160 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2161 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2162 */
2163 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2164 {
2165 uint32_t const uExitInfo1 = SelTSS;
2166 uint32_t uExitInfo2 = uErr;
2167 switch (enmTaskSwitch)
2168 {
2169 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2170 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2171 default: break;
2172 }
2173 if (fFlags & IEM_XCPT_FLAGS_ERR)
2174 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2175 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2176 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2177
2178 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2179 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2180 RT_NOREF2(uExitInfo1, uExitInfo2);
2181 }
2182
2183 /*
2184 * Check the current TSS limit. The last written byte to the current TSS during the
2185 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2186 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2187 *
2188 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2189 * end up with smaller than "legal" TSS limits.
2190 */
2191 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2192 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2193 if (uCurTSSLimit < uCurTSSLimitMin)
2194 {
2195 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2196 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2197 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2198 }
2199
2200 /*
2201 * Verify that the new TSS can be accessed and map it. Map only the required contents
2202 * and not the entire TSS.
2203 */
2204 void *pvNewTSS;
2205 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2206 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2207 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2208 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2209 * not perform correct translation if this happens. See Intel spec. 7.2.1
2210 * "Task-State Segment". */
2211 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2212 if (rcStrict != VINF_SUCCESS)
2213 {
2214 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2215 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2216 return rcStrict;
2217 }
2218
2219 /*
2220 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2221 */
2222 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2223 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2224 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2225 {
2226 PX86DESC pDescCurTSS;
2227 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2228 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2229 if (rcStrict != VINF_SUCCESS)
2230 {
2231 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2232 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2233 return rcStrict;
2234 }
2235
2236 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2237 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2238 if (rcStrict != VINF_SUCCESS)
2239 {
2240 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2241 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2242 return rcStrict;
2243 }
2244
2245 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2246 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2247 {
2248 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2249 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2250 u32EFlags &= ~X86_EFL_NT;
2251 }
2252 }
2253
2254 /*
2255 * Save the CPU state into the current TSS.
2256 */
2257 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2258 if (GCPtrNewTSS == GCPtrCurTSS)
2259 {
2260 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2261 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2262 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2263 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2264 pVCpu->cpum.GstCtx.ldtr.Sel));
2265 }
2266 if (fIsNewTSS386)
2267 {
2268 /*
2269 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2270 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2271 */
2272 void *pvCurTSS32;
2273 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2274 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2275 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2276 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2277 if (rcStrict != VINF_SUCCESS)
2278 {
2279 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2280 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2281 return rcStrict;
2282 }
2283
2284 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2285 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2286 pCurTSS32->eip = uNextEip;
2287 pCurTSS32->eflags = u32EFlags;
2288 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2289 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2290 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2291 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2292 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2293 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2294 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2295 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2296 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2297 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2298 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2299 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2300 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2301 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2302
2303 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2304 if (rcStrict != VINF_SUCCESS)
2305 {
2306 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2307 VBOXSTRICTRC_VAL(rcStrict)));
2308 return rcStrict;
2309 }
2310 }
2311 else
2312 {
2313 /*
2314 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2315 */
2316 void *pvCurTSS16;
2317 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2318 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2319 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2320 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2321 if (rcStrict != VINF_SUCCESS)
2322 {
2323 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2324 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2325 return rcStrict;
2326 }
2327
2328 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2329 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2330 pCurTSS16->ip = uNextEip;
2331 pCurTSS16->flags = u32EFlags;
2332 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2333 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2334 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2335 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2336 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2337 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2338 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2339 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2340 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2341 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2342 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2343 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2344
2345 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2346 if (rcStrict != VINF_SUCCESS)
2347 {
2348 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2349 VBOXSTRICTRC_VAL(rcStrict)));
2350 return rcStrict;
2351 }
2352 }
2353
2354 /*
2355 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2356 */
2357 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2358 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2359 {
2360 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2361 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2362 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2363 }
2364
2365 /*
2366 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2367 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2368 */
2369 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2370 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2371 bool fNewDebugTrap;
2372 if (fIsNewTSS386)
2373 {
2374 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2375 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2376 uNewEip = pNewTSS32->eip;
2377 uNewEflags = pNewTSS32->eflags;
2378 uNewEax = pNewTSS32->eax;
2379 uNewEcx = pNewTSS32->ecx;
2380 uNewEdx = pNewTSS32->edx;
2381 uNewEbx = pNewTSS32->ebx;
2382 uNewEsp = pNewTSS32->esp;
2383 uNewEbp = pNewTSS32->ebp;
2384 uNewEsi = pNewTSS32->esi;
2385 uNewEdi = pNewTSS32->edi;
2386 uNewES = pNewTSS32->es;
2387 uNewCS = pNewTSS32->cs;
2388 uNewSS = pNewTSS32->ss;
2389 uNewDS = pNewTSS32->ds;
2390 uNewFS = pNewTSS32->fs;
2391 uNewGS = pNewTSS32->gs;
2392 uNewLdt = pNewTSS32->selLdt;
2393 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2394 }
2395 else
2396 {
2397 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2398 uNewCr3 = 0;
2399 uNewEip = pNewTSS16->ip;
2400 uNewEflags = pNewTSS16->flags;
2401 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2402 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2403 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2404 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2405 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2406 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2407 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2408 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2409 uNewES = pNewTSS16->es;
2410 uNewCS = pNewTSS16->cs;
2411 uNewSS = pNewTSS16->ss;
2412 uNewDS = pNewTSS16->ds;
2413 uNewFS = 0;
2414 uNewGS = 0;
2415 uNewLdt = pNewTSS16->selLdt;
2416 fNewDebugTrap = false;
2417 }
2418
2419 if (GCPtrNewTSS == GCPtrCurTSS)
2420 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2421 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2422
2423 /*
2424 * We're done accessing the new TSS.
2425 */
2426 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2430 return rcStrict;
2431 }
2432
2433 /*
2434 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2435 */
2436 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2437 {
2438 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2439 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2440 if (rcStrict != VINF_SUCCESS)
2441 {
2442 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2443 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2444 return rcStrict;
2445 }
2446
2447 /* Check that the descriptor indicates the new TSS is available (not busy). */
2448 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2449 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2450 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2451
2452 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2453 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2454 if (rcStrict != VINF_SUCCESS)
2455 {
2456 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2457 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2458 return rcStrict;
2459 }
2460 }
2461
2462 /*
2463 * From this point on, we're technically in the new task. We will defer exceptions
2464 * until the completion of the task switch but before executing any instructions in the new task.
2465 */
2466 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2467 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2468 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2469 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2470 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2471 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2472 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2473
2474 /* Set the busy bit in TR. */
2475 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2476
2477 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2478 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2479 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2480 {
2481 uNewEflags |= X86_EFL_NT;
2482 }
2483
2484 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2485 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2486 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2487
2488 pVCpu->cpum.GstCtx.eip = uNewEip;
2489 pVCpu->cpum.GstCtx.eax = uNewEax;
2490 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2491 pVCpu->cpum.GstCtx.edx = uNewEdx;
2492 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2493 pVCpu->cpum.GstCtx.esp = uNewEsp;
2494 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2495 pVCpu->cpum.GstCtx.esi = uNewEsi;
2496 pVCpu->cpum.GstCtx.edi = uNewEdi;
2497
2498 uNewEflags &= X86_EFL_LIVE_MASK;
2499 uNewEflags |= X86_EFL_RA1_MASK;
2500 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2501
2502 /*
2503 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2504 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2505 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2506 */
2507 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2508 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2509
2510 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2511 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2512
2513 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2514 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2515
2516 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2517 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2518
2519 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2520 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2521
2522 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2523 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2524 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2525
2526 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2527 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2528 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2529 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2530
2531 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2532 {
2533 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2534 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2535 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2536 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2537 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2538 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2539 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2540 }
2541
2542 /*
2543 * Switch CR3 for the new task.
2544 */
2545 if ( fIsNewTSS386
2546 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2547 {
2548 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2549 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2550 AssertRCSuccessReturn(rc, rc);
2551
2552 /* Inform PGM. */
2553 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2554 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2555 AssertRCReturn(rc, rc);
2556 /* ignore informational status codes */
2557
2558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2559 }
2560
2561 /*
2562 * Switch LDTR for the new task.
2563 */
2564 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2565 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2566 else
2567 {
2568 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2569
2570 IEMSELDESC DescNewLdt;
2571 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2572 if (rcStrict != VINF_SUCCESS)
2573 {
2574 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2575 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2576 return rcStrict;
2577 }
2578 if ( !DescNewLdt.Legacy.Gen.u1Present
2579 || DescNewLdt.Legacy.Gen.u1DescType
2580 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2581 {
2582 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2583 uNewLdt, DescNewLdt.Legacy.u));
2584 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2585 }
2586
2587 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2588 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2589 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2590 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2591 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2592 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2593 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2595 }
2596
2597 IEMSELDESC DescSS;
2598 if (IEM_IS_V86_MODE(pVCpu))
2599 {
2600 pVCpu->iem.s.uCpl = 3;
2601 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2602 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2603 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2604 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2605 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2606 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2607
2608 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2609 DescSS.Legacy.u = 0;
2610 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2611 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2612 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2613 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2614 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2615 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2616 DescSS.Legacy.Gen.u2Dpl = 3;
2617 }
2618 else
2619 {
2620 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2621
2622 /*
2623 * Load the stack segment for the new task.
2624 */
2625 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2626 {
2627 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2628 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2629 }
2630
2631 /* Fetch the descriptor. */
2632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2636 VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* SS must be a data segment and writable. */
2641 if ( !DescSS.Legacy.Gen.u1DescType
2642 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2643 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2644 {
2645 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2646 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2647 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2648 }
2649
2650 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2651 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2652 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2653 {
2654 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2655 uNewCpl));
2656 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2657 }
2658
2659 /* Is it there? */
2660 if (!DescSS.Legacy.Gen.u1Present)
2661 {
2662 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2663 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2664 }
2665
2666 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2667 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2668
2669 /* Set the accessed bit before committing the result into SS. */
2670 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2671 {
2672 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2673 if (rcStrict != VINF_SUCCESS)
2674 return rcStrict;
2675 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2676 }
2677
2678 /* Commit SS. */
2679 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2680 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2681 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2682 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2683 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2684 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2685 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2686
2687 /* CPL has changed, update IEM before loading rest of segments. */
2688 pVCpu->iem.s.uCpl = uNewCpl;
2689
2690 /*
2691 * Load the data segments for the new task.
2692 */
2693 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2694 if (rcStrict != VINF_SUCCESS)
2695 return rcStrict;
2696 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2697 if (rcStrict != VINF_SUCCESS)
2698 return rcStrict;
2699 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2700 if (rcStrict != VINF_SUCCESS)
2701 return rcStrict;
2702 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2703 if (rcStrict != VINF_SUCCESS)
2704 return rcStrict;
2705
2706 /*
2707 * Load the code segment for the new task.
2708 */
2709 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2710 {
2711 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2712 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2713 }
2714
2715 /* Fetch the descriptor. */
2716 IEMSELDESC DescCS;
2717 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2718 if (rcStrict != VINF_SUCCESS)
2719 {
2720 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2721 return rcStrict;
2722 }
2723
2724 /* CS must be a code segment. */
2725 if ( !DescCS.Legacy.Gen.u1DescType
2726 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2727 {
2728 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2729 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2730 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2731 }
2732
2733 /* For conforming CS, DPL must be less than or equal to the RPL. */
2734 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2735 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2736 {
2737 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2738 DescCS.Legacy.Gen.u2Dpl));
2739 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2740 }
2741
2742 /* For non-conforming CS, DPL must match RPL. */
2743 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2744 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2745 {
2746 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2747 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2748 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2749 }
2750
2751 /* Is it there? */
2752 if (!DescCS.Legacy.Gen.u1Present)
2753 {
2754 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2755 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2756 }
2757
2758 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2759 u64Base = X86DESC_BASE(&DescCS.Legacy);
2760
2761 /* Set the accessed bit before committing the result into CS. */
2762 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2763 {
2764 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2765 if (rcStrict != VINF_SUCCESS)
2766 return rcStrict;
2767 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2768 }
2769
2770 /* Commit CS. */
2771 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2772 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2773 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2774 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2775 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2776 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2778 }
2779
2780 /** @todo Debug trap. */
2781 if (fIsNewTSS386 && fNewDebugTrap)
2782 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2783
2784 /*
2785 * Construct the error code masks based on what caused this task switch.
2786 * See Intel Instruction reference for INT.
2787 */
2788 uint16_t uExt;
2789 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2790 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2791 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2792 {
2793 uExt = 1;
2794 }
2795 else
2796 uExt = 0;
2797
2798 /*
2799 * Push any error code on to the new stack.
2800 */
2801 if (fFlags & IEM_XCPT_FLAGS_ERR)
2802 {
2803 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2804 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2805 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2806
2807 /* Check that there is sufficient space on the stack. */
2808 /** @todo Factor out segment limit checking for normal/expand down segments
2809 * into a separate function. */
2810 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2811 {
2812 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2813 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2814 {
2815 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2816 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2817 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2818 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2819 }
2820 }
2821 else
2822 {
2823 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2824 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2825 {
2826 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2827 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2828 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2829 }
2830 }
2831
2832
2833 if (fIsNewTSS386)
2834 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2835 else
2836 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2837 if (rcStrict != VINF_SUCCESS)
2838 {
2839 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2840 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2841 return rcStrict;
2842 }
2843 }
2844
2845 /* Check the new EIP against the new CS limit. */
2846 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2847 {
2848 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2849 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2850 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2851 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2852 }
2853
2854 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2855 pVCpu->cpum.GstCtx.ss.Sel));
2856 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Implements exceptions and interrupts for protected mode.
2862 *
2863 * @returns VBox strict status code.
2864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2865 * @param cbInstr The number of bytes to offset rIP by in the return
2866 * address.
2867 * @param u8Vector The interrupt / exception vector number.
2868 * @param fFlags The flags.
2869 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2870 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2871 */
2872static VBOXSTRICTRC
2873iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2874 uint8_t cbInstr,
2875 uint8_t u8Vector,
2876 uint32_t fFlags,
2877 uint16_t uErr,
2878 uint64_t uCr2) RT_NOEXCEPT
2879{
2880 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2881
2882 /*
2883 * Read the IDT entry.
2884 */
2885 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2886 {
2887 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2888 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2889 }
2890 X86DESC Idte;
2891 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2892 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2893 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2894 {
2895 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2896 return rcStrict;
2897 }
2898 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2899 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2900 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2901
2902 /*
2903 * Check the descriptor type, DPL and such.
2904 * ASSUMES this is done in the same order as described for call-gate calls.
2905 */
2906 if (Idte.Gate.u1DescType)
2907 {
2908 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2909 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2910 }
2911 bool fTaskGate = false;
2912 uint8_t f32BitGate = true;
2913 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2914 switch (Idte.Gate.u4Type)
2915 {
2916 case X86_SEL_TYPE_SYS_UNDEFINED:
2917 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2918 case X86_SEL_TYPE_SYS_LDT:
2919 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2920 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2921 case X86_SEL_TYPE_SYS_UNDEFINED2:
2922 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2923 case X86_SEL_TYPE_SYS_UNDEFINED3:
2924 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2925 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2926 case X86_SEL_TYPE_SYS_UNDEFINED4:
2927 {
2928 /** @todo check what actually happens when the type is wrong...
2929 * esp. call gates. */
2930 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2931 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2932 }
2933
2934 case X86_SEL_TYPE_SYS_286_INT_GATE:
2935 f32BitGate = false;
2936 RT_FALL_THRU();
2937 case X86_SEL_TYPE_SYS_386_INT_GATE:
2938 fEflToClear |= X86_EFL_IF;
2939 break;
2940
2941 case X86_SEL_TYPE_SYS_TASK_GATE:
2942 fTaskGate = true;
2943#ifndef IEM_IMPLEMENTS_TASKSWITCH
2944 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2945#endif
2946 break;
2947
2948 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2949 f32BitGate = false;
2950 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2951 break;
2952
2953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2954 }
2955
2956 /* Check DPL against CPL if applicable. */
2957 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2958 {
2959 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2960 {
2961 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2962 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2963 }
2964 }
2965
2966 /* Is it there? */
2967 if (!Idte.Gate.u1Present)
2968 {
2969 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2970 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2971 }
2972
2973 /* Is it a task-gate? */
2974 if (fTaskGate)
2975 {
2976 /*
2977 * Construct the error code masks based on what caused this task switch.
2978 * See Intel Instruction reference for INT.
2979 */
2980 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2981 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2982 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
2983 RTSEL SelTSS = Idte.Gate.u16Sel;
2984
2985 /*
2986 * Fetch the TSS descriptor in the GDT.
2987 */
2988 IEMSELDESC DescTSS;
2989 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
2990 if (rcStrict != VINF_SUCCESS)
2991 {
2992 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
2993 VBOXSTRICTRC_VAL(rcStrict)));
2994 return rcStrict;
2995 }
2996
2997 /* The TSS descriptor must be a system segment and be available (not busy). */
2998 if ( DescTSS.Legacy.Gen.u1DescType
2999 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3000 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3001 {
3002 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3003 u8Vector, SelTSS, DescTSS.Legacy.au64));
3004 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3005 }
3006
3007 /* The TSS must be present. */
3008 if (!DescTSS.Legacy.Gen.u1Present)
3009 {
3010 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3011 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3012 }
3013
3014 /* Do the actual task switch. */
3015 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3016 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3017 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3018 }
3019
3020 /* A null CS is bad. */
3021 RTSEL NewCS = Idte.Gate.u16Sel;
3022 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3023 {
3024 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3025 return iemRaiseGeneralProtectionFault0(pVCpu);
3026 }
3027
3028 /* Fetch the descriptor for the new CS. */
3029 IEMSELDESC DescCS;
3030 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3031 if (rcStrict != VINF_SUCCESS)
3032 {
3033 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3034 return rcStrict;
3035 }
3036
3037 /* Must be a code segment. */
3038 if (!DescCS.Legacy.Gen.u1DescType)
3039 {
3040 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3041 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3042 }
3043 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3044 {
3045 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3046 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3047 }
3048
3049 /* Don't allow lowering the privilege level. */
3050 /** @todo Does the lowering of privileges apply to software interrupts
3051 * only? This has bearings on the more-privileged or
3052 * same-privilege stack behavior further down. A testcase would
3053 * be nice. */
3054 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3055 {
3056 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3057 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3058 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3059 }
3060
3061 /* Make sure the selector is present. */
3062 if (!DescCS.Legacy.Gen.u1Present)
3063 {
3064 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3065 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3066 }
3067
3068 /* Check the new EIP against the new CS limit. */
3069 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3070 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3071 ? Idte.Gate.u16OffsetLow
3072 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3073 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3074 if (uNewEip > cbLimitCS)
3075 {
3076 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3077 u8Vector, uNewEip, cbLimitCS, NewCS));
3078 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3079 }
3080 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3081
3082 /* Calc the flag image to push. */
3083 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3084 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3085 fEfl &= ~X86_EFL_RF;
3086 else
3087 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3088
3089 /* From V8086 mode only go to CPL 0. */
3090 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3091 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3092 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3093 {
3094 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3095 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3096 }
3097
3098 /*
3099 * If the privilege level changes, we need to get a new stack from the TSS.
3100 * This in turns means validating the new SS and ESP...
3101 */
3102 if (uNewCpl != pVCpu->iem.s.uCpl)
3103 {
3104 RTSEL NewSS;
3105 uint32_t uNewEsp;
3106 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3107 if (rcStrict != VINF_SUCCESS)
3108 return rcStrict;
3109
3110 IEMSELDESC DescSS;
3111 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3112 if (rcStrict != VINF_SUCCESS)
3113 return rcStrict;
3114 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3115 if (!DescSS.Legacy.Gen.u1DefBig)
3116 {
3117 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3118 uNewEsp = (uint16_t)uNewEsp;
3119 }
3120
3121 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3122
3123 /* Check that there is sufficient space for the stack frame. */
3124 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3125 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3126 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3127 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3128
3129 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3130 {
3131 if ( uNewEsp - 1 > cbLimitSS
3132 || uNewEsp < cbStackFrame)
3133 {
3134 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3135 u8Vector, NewSS, uNewEsp, cbStackFrame));
3136 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3137 }
3138 }
3139 else
3140 {
3141 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3142 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3143 {
3144 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3145 u8Vector, NewSS, uNewEsp, cbStackFrame));
3146 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3147 }
3148 }
3149
3150 /*
3151 * Start making changes.
3152 */
3153
3154 /* Set the new CPL so that stack accesses use it. */
3155 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3156 pVCpu->iem.s.uCpl = uNewCpl;
3157
3158 /* Create the stack frame. */
3159 RTPTRUNION uStackFrame;
3160 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3161 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3162 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3163 if (rcStrict != VINF_SUCCESS)
3164 return rcStrict;
3165 void * const pvStackFrame = uStackFrame.pv;
3166 if (f32BitGate)
3167 {
3168 if (fFlags & IEM_XCPT_FLAGS_ERR)
3169 *uStackFrame.pu32++ = uErr;
3170 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3171 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3172 uStackFrame.pu32[2] = fEfl;
3173 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3174 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3175 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3176 if (fEfl & X86_EFL_VM)
3177 {
3178 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3179 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3180 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3181 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3182 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3183 }
3184 }
3185 else
3186 {
3187 if (fFlags & IEM_XCPT_FLAGS_ERR)
3188 *uStackFrame.pu16++ = uErr;
3189 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3190 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3191 uStackFrame.pu16[2] = fEfl;
3192 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3193 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3194 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3195 if (fEfl & X86_EFL_VM)
3196 {
3197 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3198 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3199 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3200 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3201 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3202 }
3203 }
3204 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3205 if (rcStrict != VINF_SUCCESS)
3206 return rcStrict;
3207
3208 /* Mark the selectors 'accessed' (hope this is the correct time). */
3209 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3210 * after pushing the stack frame? (Write protect the gdt + stack to
3211 * find out.) */
3212 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3213 {
3214 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3215 if (rcStrict != VINF_SUCCESS)
3216 return rcStrict;
3217 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3218 }
3219
3220 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3221 {
3222 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3223 if (rcStrict != VINF_SUCCESS)
3224 return rcStrict;
3225 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3226 }
3227
3228 /*
3229 * Start comitting the register changes (joins with the DPL=CPL branch).
3230 */
3231 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3232 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3233 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3234 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3235 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3236 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3237 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3238 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3239 * SP is loaded).
3240 * Need to check the other combinations too:
3241 * - 16-bit TSS, 32-bit handler
3242 * - 32-bit TSS, 16-bit handler */
3243 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3244 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3245 else
3246 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3247
3248 if (fEfl & X86_EFL_VM)
3249 {
3250 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3251 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3252 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3253 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3254 }
3255 }
3256 /*
3257 * Same privilege, no stack change and smaller stack frame.
3258 */
3259 else
3260 {
3261 uint64_t uNewRsp;
3262 RTPTRUNION uStackFrame;
3263 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3264 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3265 if (rcStrict != VINF_SUCCESS)
3266 return rcStrict;
3267 void * const pvStackFrame = uStackFrame.pv;
3268
3269 if (f32BitGate)
3270 {
3271 if (fFlags & IEM_XCPT_FLAGS_ERR)
3272 *uStackFrame.pu32++ = uErr;
3273 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3274 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3275 uStackFrame.pu32[2] = fEfl;
3276 }
3277 else
3278 {
3279 if (fFlags & IEM_XCPT_FLAGS_ERR)
3280 *uStackFrame.pu16++ = uErr;
3281 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3282 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3283 uStackFrame.pu16[2] = fEfl;
3284 }
3285 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3286 if (rcStrict != VINF_SUCCESS)
3287 return rcStrict;
3288
3289 /* Mark the CS selector as 'accessed'. */
3290 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3291 {
3292 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3293 if (rcStrict != VINF_SUCCESS)
3294 return rcStrict;
3295 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3296 }
3297
3298 /*
3299 * Start committing the register changes (joins with the other branch).
3300 */
3301 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3302 }
3303
3304 /* ... register committing continues. */
3305 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3306 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3307 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3308 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3309 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3310 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3311
3312 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3313 fEfl &= ~fEflToClear;
3314 IEMMISC_SET_EFL(pVCpu, fEfl);
3315
3316 if (fFlags & IEM_XCPT_FLAGS_CR2)
3317 pVCpu->cpum.GstCtx.cr2 = uCr2;
3318
3319 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3320 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3321
3322 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3323}
3324
3325
3326/**
3327 * Implements exceptions and interrupts for long mode.
3328 *
3329 * @returns VBox strict status code.
3330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3331 * @param cbInstr The number of bytes to offset rIP by in the return
3332 * address.
3333 * @param u8Vector The interrupt / exception vector number.
3334 * @param fFlags The flags.
3335 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3336 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3337 */
3338static VBOXSTRICTRC
3339iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3340 uint8_t cbInstr,
3341 uint8_t u8Vector,
3342 uint32_t fFlags,
3343 uint16_t uErr,
3344 uint64_t uCr2) RT_NOEXCEPT
3345{
3346 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3347
3348 /*
3349 * Read the IDT entry.
3350 */
3351 uint16_t offIdt = (uint16_t)u8Vector << 4;
3352 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3353 {
3354 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3355 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3356 }
3357 X86DESC64 Idte;
3358#ifdef _MSC_VER /* Shut up silly compiler warning. */
3359 Idte.au64[0] = 0;
3360 Idte.au64[1] = 0;
3361#endif
3362 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3363 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3364 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3365 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3366 {
3367 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3368 return rcStrict;
3369 }
3370 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3371 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3372 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3373
3374 /*
3375 * Check the descriptor type, DPL and such.
3376 * ASSUMES this is done in the same order as described for call-gate calls.
3377 */
3378 if (Idte.Gate.u1DescType)
3379 {
3380 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3381 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3382 }
3383 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3384 switch (Idte.Gate.u4Type)
3385 {
3386 case AMD64_SEL_TYPE_SYS_INT_GATE:
3387 fEflToClear |= X86_EFL_IF;
3388 break;
3389 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3390 break;
3391
3392 default:
3393 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3394 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3395 }
3396
3397 /* Check DPL against CPL if applicable. */
3398 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3399 {
3400 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3401 {
3402 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3403 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3404 }
3405 }
3406
3407 /* Is it there? */
3408 if (!Idte.Gate.u1Present)
3409 {
3410 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3411 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3412 }
3413
3414 /* A null CS is bad. */
3415 RTSEL NewCS = Idte.Gate.u16Sel;
3416 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3417 {
3418 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3419 return iemRaiseGeneralProtectionFault0(pVCpu);
3420 }
3421
3422 /* Fetch the descriptor for the new CS. */
3423 IEMSELDESC DescCS;
3424 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3425 if (rcStrict != VINF_SUCCESS)
3426 {
3427 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3428 return rcStrict;
3429 }
3430
3431 /* Must be a 64-bit code segment. */
3432 if (!DescCS.Long.Gen.u1DescType)
3433 {
3434 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3435 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3436 }
3437 if ( !DescCS.Long.Gen.u1Long
3438 || DescCS.Long.Gen.u1DefBig
3439 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3440 {
3441 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3442 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3443 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3444 }
3445
3446 /* Don't allow lowering the privilege level. For non-conforming CS
3447 selectors, the CS.DPL sets the privilege level the trap/interrupt
3448 handler runs at. For conforming CS selectors, the CPL remains
3449 unchanged, but the CS.DPL must be <= CPL. */
3450 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3451 * when CPU in Ring-0. Result \#GP? */
3452 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3453 {
3454 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3455 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3456 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3457 }
3458
3459
3460 /* Make sure the selector is present. */
3461 if (!DescCS.Legacy.Gen.u1Present)
3462 {
3463 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3464 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3465 }
3466
3467 /* Check that the new RIP is canonical. */
3468 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3469 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3470 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3471 if (!IEM_IS_CANONICAL(uNewRip))
3472 {
3473 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3474 return iemRaiseGeneralProtectionFault0(pVCpu);
3475 }
3476
3477 /*
3478 * If the privilege level changes or if the IST isn't zero, we need to get
3479 * a new stack from the TSS.
3480 */
3481 uint64_t uNewRsp;
3482 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3483 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3484 if ( uNewCpl != pVCpu->iem.s.uCpl
3485 || Idte.Gate.u3IST != 0)
3486 {
3487 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3488 if (rcStrict != VINF_SUCCESS)
3489 return rcStrict;
3490 }
3491 else
3492 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3493 uNewRsp &= ~(uint64_t)0xf;
3494
3495 /*
3496 * Calc the flag image to push.
3497 */
3498 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3499 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3500 fEfl &= ~X86_EFL_RF;
3501 else
3502 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3503
3504 /*
3505 * Start making changes.
3506 */
3507 /* Set the new CPL so that stack accesses use it. */
3508 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3509 pVCpu->iem.s.uCpl = uNewCpl;
3510
3511 /* Create the stack frame. */
3512 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3513 RTPTRUNION uStackFrame;
3514 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3515 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3516 if (rcStrict != VINF_SUCCESS)
3517 return rcStrict;
3518 void * const pvStackFrame = uStackFrame.pv;
3519
3520 if (fFlags & IEM_XCPT_FLAGS_ERR)
3521 *uStackFrame.pu64++ = uErr;
3522 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3523 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3524 uStackFrame.pu64[2] = fEfl;
3525 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3526 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3527 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3528 if (rcStrict != VINF_SUCCESS)
3529 return rcStrict;
3530
3531 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3532 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3533 * after pushing the stack frame? (Write protect the gdt + stack to
3534 * find out.) */
3535 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3536 {
3537 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3538 if (rcStrict != VINF_SUCCESS)
3539 return rcStrict;
3540 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3541 }
3542
3543 /*
3544 * Start comitting the register changes.
3545 */
3546 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3547 * hidden registers when interrupting 32-bit or 16-bit code! */
3548 if (uNewCpl != uOldCpl)
3549 {
3550 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3551 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3552 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3553 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3554 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3555 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3556 }
3557 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3558 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3559 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3560 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3561 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3562 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3563 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3564 pVCpu->cpum.GstCtx.rip = uNewRip;
3565
3566 fEfl &= ~fEflToClear;
3567 IEMMISC_SET_EFL(pVCpu, fEfl);
3568
3569 if (fFlags & IEM_XCPT_FLAGS_CR2)
3570 pVCpu->cpum.GstCtx.cr2 = uCr2;
3571
3572 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3573 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3574
3575 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3576}
3577
3578
3579/**
3580 * Implements exceptions and interrupts.
3581 *
3582 * All exceptions and interrupts goes thru this function!
3583 *
3584 * @returns VBox strict status code.
3585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3586 * @param cbInstr The number of bytes to offset rIP by in the return
3587 * address.
3588 * @param u8Vector The interrupt / exception vector number.
3589 * @param fFlags The flags.
3590 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3591 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3592 */
3593VBOXSTRICTRC
3594iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3595 uint8_t cbInstr,
3596 uint8_t u8Vector,
3597 uint32_t fFlags,
3598 uint16_t uErr,
3599 uint64_t uCr2) RT_NOEXCEPT
3600{
3601 /*
3602 * Get all the state that we might need here.
3603 */
3604 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3605 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3606
3607#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3608 /*
3609 * Flush prefetch buffer
3610 */
3611 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3612#endif
3613
3614 /*
3615 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3616 */
3617 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3618 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3619 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3620 | IEM_XCPT_FLAGS_BP_INSTR
3621 | IEM_XCPT_FLAGS_ICEBP_INSTR
3622 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3623 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3624 {
3625 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3626 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3627 u8Vector = X86_XCPT_GP;
3628 uErr = 0;
3629 }
3630#ifdef DBGFTRACE_ENABLED
3631 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3632 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3633 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3634#endif
3635
3636 /*
3637 * Evaluate whether NMI blocking should be in effect.
3638 * Normally, NMI blocking is in effect whenever we inject an NMI.
3639 */
3640 bool fBlockNmi;
3641 if ( u8Vector == X86_XCPT_NMI
3642 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3643 fBlockNmi = true;
3644 else
3645 fBlockNmi = false;
3646
3647#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3648 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3649 {
3650 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3651 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3652 return rcStrict0;
3653
3654 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3655 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3656 {
3657 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3658 fBlockNmi = false;
3659 }
3660 }
3661#endif
3662
3663#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3664 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3665 {
3666 /*
3667 * If the event is being injected as part of VMRUN, it isn't subject to event
3668 * intercepts in the nested-guest. However, secondary exceptions that occur
3669 * during injection of any event -are- subject to exception intercepts.
3670 *
3671 * See AMD spec. 15.20 "Event Injection".
3672 */
3673 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3674 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3675 else
3676 {
3677 /*
3678 * Check and handle if the event being raised is intercepted.
3679 */
3680 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3681 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3682 return rcStrict0;
3683 }
3684 }
3685#endif
3686
3687 /*
3688 * Set NMI blocking if necessary.
3689 */
3690 if ( fBlockNmi
3691 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3692 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3693
3694 /*
3695 * Do recursion accounting.
3696 */
3697 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3698 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3699 if (pVCpu->iem.s.cXcptRecursions == 0)
3700 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3701 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3702 else
3703 {
3704 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3705 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3706 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3707
3708 if (pVCpu->iem.s.cXcptRecursions >= 4)
3709 {
3710#ifdef DEBUG_bird
3711 AssertFailed();
3712#endif
3713 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3714 }
3715
3716 /*
3717 * Evaluate the sequence of recurring events.
3718 */
3719 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3720 NULL /* pXcptRaiseInfo */);
3721 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3722 { /* likely */ }
3723 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3724 {
3725 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3726 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3727 u8Vector = X86_XCPT_DF;
3728 uErr = 0;
3729#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3730 /* VMX nested-guest #DF intercept needs to be checked here. */
3731 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3732 {
3733 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3734 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3735 return rcStrict0;
3736 }
3737#endif
3738 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3739 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3740 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3741 }
3742 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3743 {
3744 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3745 return iemInitiateCpuShutdown(pVCpu);
3746 }
3747 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3748 {
3749 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3750 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3751 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3752 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3753 return VERR_EM_GUEST_CPU_HANG;
3754 }
3755 else
3756 {
3757 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3758 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3759 return VERR_IEM_IPE_9;
3760 }
3761
3762 /*
3763 * The 'EXT' bit is set when an exception occurs during deliver of an external
3764 * event (such as an interrupt or earlier exception)[1]. Privileged software
3765 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3766 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3767 *
3768 * [1] - Intel spec. 6.13 "Error Code"
3769 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3770 * [3] - Intel Instruction reference for INT n.
3771 */
3772 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3773 && (fFlags & IEM_XCPT_FLAGS_ERR)
3774 && u8Vector != X86_XCPT_PF
3775 && u8Vector != X86_XCPT_DF)
3776 {
3777 uErr |= X86_TRAP_ERR_EXTERNAL;
3778 }
3779 }
3780
3781 pVCpu->iem.s.cXcptRecursions++;
3782 pVCpu->iem.s.uCurXcpt = u8Vector;
3783 pVCpu->iem.s.fCurXcpt = fFlags;
3784 pVCpu->iem.s.uCurXcptErr = uErr;
3785 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3786
3787 /*
3788 * Extensive logging.
3789 */
3790#if defined(LOG_ENABLED) && defined(IN_RING3)
3791 if (LogIs3Enabled())
3792 {
3793 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3794 PVM pVM = pVCpu->CTX_SUFF(pVM);
3795 char szRegs[4096];
3796 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3797 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3798 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3799 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3800 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3801 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3802 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3803 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3804 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3805 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3806 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3807 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3808 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3809 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3810 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3811 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3812 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3813 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3814 " efer=%016VR{efer}\n"
3815 " pat=%016VR{pat}\n"
3816 " sf_mask=%016VR{sf_mask}\n"
3817 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3818 " lstar=%016VR{lstar}\n"
3819 " star=%016VR{star} cstar=%016VR{cstar}\n"
3820 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3821 );
3822
3823 char szInstr[256];
3824 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3825 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3826 szInstr, sizeof(szInstr), NULL);
3827 Log3(("%s%s\n", szRegs, szInstr));
3828 }
3829#endif /* LOG_ENABLED */
3830
3831 /*
3832 * Call the mode specific worker function.
3833 */
3834 VBOXSTRICTRC rcStrict;
3835 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3836 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3837 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3838 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3839 else
3840 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3841
3842 /* Flush the prefetch buffer. */
3843#ifdef IEM_WITH_CODE_TLB
3844 pVCpu->iem.s.pbInstrBuf = NULL;
3845#else
3846 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3847#endif
3848
3849 /*
3850 * Unwind.
3851 */
3852 pVCpu->iem.s.cXcptRecursions--;
3853 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3854 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3855 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3856 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3857 pVCpu->iem.s.cXcptRecursions + 1));
3858 return rcStrict;
3859}
3860
3861#ifdef IEM_WITH_SETJMP
3862/**
3863 * See iemRaiseXcptOrInt. Will not return.
3864 */
3865DECL_NO_RETURN(void)
3866iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3867 uint8_t cbInstr,
3868 uint8_t u8Vector,
3869 uint32_t fFlags,
3870 uint16_t uErr,
3871 uint64_t uCr2) RT_NOEXCEPT
3872{
3873 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3874 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3875}
3876#endif
3877
3878
3879/** \#DE - 00. */
3880VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3881{
3882 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3883}
3884
3885
3886/** \#DB - 01.
3887 * @note This automatically clear DR7.GD. */
3888VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3889{
3890 /** @todo set/clear RF. */
3891 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3892 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3893}
3894
3895
3896/** \#BR - 05. */
3897VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3898{
3899 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3900}
3901
3902
3903/** \#UD - 06. */
3904VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3905{
3906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3907}
3908
3909
3910/** \#NM - 07. */
3911VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3912{
3913 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3914}
3915
3916
3917/** \#TS(err) - 0a. */
3918VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3919{
3920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3921}
3922
3923
3924/** \#TS(tr) - 0a. */
3925VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3926{
3927 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3928 pVCpu->cpum.GstCtx.tr.Sel, 0);
3929}
3930
3931
3932/** \#TS(0) - 0a. */
3933VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3934{
3935 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3936 0, 0);
3937}
3938
3939
3940/** \#TS(err) - 0a. */
3941VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3942{
3943 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3944 uSel & X86_SEL_MASK_OFF_RPL, 0);
3945}
3946
3947
3948/** \#NP(err) - 0b. */
3949VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3950{
3951 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3952}
3953
3954
3955/** \#NP(sel) - 0b. */
3956VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3957{
3958 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3959 uSel & ~X86_SEL_RPL, 0);
3960}
3961
3962
3963/** \#SS(seg) - 0c. */
3964VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3965{
3966 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3967 uSel & ~X86_SEL_RPL, 0);
3968}
3969
3970
3971/** \#SS(err) - 0c. */
3972VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3973{
3974 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3975}
3976
3977
3978/** \#GP(n) - 0d. */
3979VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3980{
3981 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3982}
3983
3984
3985/** \#GP(0) - 0d. */
3986VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3987{
3988 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3989}
3990
3991#ifdef IEM_WITH_SETJMP
3992/** \#GP(0) - 0d. */
3993DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
3994{
3995 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3996}
3997#endif
3998
3999
4000/** \#GP(sel) - 0d. */
4001VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4002{
4003 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4004 Sel & ~X86_SEL_RPL, 0);
4005}
4006
4007
4008/** \#GP(0) - 0d. */
4009VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4010{
4011 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4012}
4013
4014
4015/** \#GP(sel) - 0d. */
4016VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4017{
4018 NOREF(iSegReg); NOREF(fAccess);
4019 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4020 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4021}
4022
4023#ifdef IEM_WITH_SETJMP
4024/** \#GP(sel) - 0d, longjmp. */
4025DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4026{
4027 NOREF(iSegReg); NOREF(fAccess);
4028 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4029 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4030}
4031#endif
4032
4033/** \#GP(sel) - 0d. */
4034VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4035{
4036 NOREF(Sel);
4037 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4038}
4039
4040#ifdef IEM_WITH_SETJMP
4041/** \#GP(sel) - 0d, longjmp. */
4042DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4043{
4044 NOREF(Sel);
4045 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4046}
4047#endif
4048
4049
4050/** \#GP(sel) - 0d. */
4051VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4052{
4053 NOREF(iSegReg); NOREF(fAccess);
4054 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4055}
4056
4057#ifdef IEM_WITH_SETJMP
4058/** \#GP(sel) - 0d, longjmp. */
4059DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4060{
4061 NOREF(iSegReg); NOREF(fAccess);
4062 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4063}
4064#endif
4065
4066
4067/** \#PF(n) - 0e. */
4068VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4069{
4070 uint16_t uErr;
4071 switch (rc)
4072 {
4073 case VERR_PAGE_NOT_PRESENT:
4074 case VERR_PAGE_TABLE_NOT_PRESENT:
4075 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4076 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4077 uErr = 0;
4078 break;
4079
4080 default:
4081 AssertMsgFailed(("%Rrc\n", rc));
4082 RT_FALL_THRU();
4083 case VERR_ACCESS_DENIED:
4084 uErr = X86_TRAP_PF_P;
4085 break;
4086
4087 /** @todo reserved */
4088 }
4089
4090 if (pVCpu->iem.s.uCpl == 3)
4091 uErr |= X86_TRAP_PF_US;
4092
4093 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4094 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4095 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4096 uErr |= X86_TRAP_PF_ID;
4097
4098#if 0 /* This is so much non-sense, really. Why was it done like that? */
4099 /* Note! RW access callers reporting a WRITE protection fault, will clear
4100 the READ flag before calling. So, read-modify-write accesses (RW)
4101 can safely be reported as READ faults. */
4102 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4103 uErr |= X86_TRAP_PF_RW;
4104#else
4105 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4106 {
4107 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4108 /// (regardless of outcome of the comparison in the latter case).
4109 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4110 uErr |= X86_TRAP_PF_RW;
4111 }
4112#endif
4113
4114 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4115 uErr, GCPtrWhere);
4116}
4117
4118#ifdef IEM_WITH_SETJMP
4119/** \#PF(n) - 0e, longjmp. */
4120DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4121{
4122 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4123}
4124#endif
4125
4126
4127/** \#MF(0) - 10. */
4128VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4129{
4130 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4131}
4132
4133
4134/** \#AC(0) - 11. */
4135VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4136{
4137 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4138}
4139
4140#ifdef IEM_WITH_SETJMP
4141/** \#AC(0) - 11, longjmp. */
4142DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4143{
4144 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4145}
4146#endif
4147
4148
4149/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4150IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4151{
4152 NOREF(cbInstr);
4153 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4154}
4155
4156
4157/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4158IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4159{
4160 NOREF(cbInstr);
4161 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4162}
4163
4164
4165/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4166IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4167{
4168 NOREF(cbInstr);
4169 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4170}
4171
4172
4173/** @} */
4174
4175/** @name Common opcode decoders.
4176 * @{
4177 */
4178//#include <iprt/mem.h>
4179
4180/**
4181 * Used to add extra details about a stub case.
4182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4183 */
4184void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4185{
4186#if defined(LOG_ENABLED) && defined(IN_RING3)
4187 PVM pVM = pVCpu->CTX_SUFF(pVM);
4188 char szRegs[4096];
4189 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4190 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4191 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4192 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4193 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4194 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4195 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4196 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4197 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4198 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4199 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4200 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4201 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4202 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4203 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4204 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4205 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4206 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4207 " efer=%016VR{efer}\n"
4208 " pat=%016VR{pat}\n"
4209 " sf_mask=%016VR{sf_mask}\n"
4210 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4211 " lstar=%016VR{lstar}\n"
4212 " star=%016VR{star} cstar=%016VR{cstar}\n"
4213 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4214 );
4215
4216 char szInstr[256];
4217 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4218 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4219 szInstr, sizeof(szInstr), NULL);
4220
4221 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4222#else
4223 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4224#endif
4225}
4226
4227/** @} */
4228
4229
4230
4231/** @name Register Access.
4232 * @{
4233 */
4234
4235/**
4236 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4237 *
4238 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4239 * segment limit.
4240 *
4241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4242 * @param offNextInstr The offset of the next instruction.
4243 */
4244VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4245{
4246 switch (pVCpu->iem.s.enmEffOpSize)
4247 {
4248 case IEMMODE_16BIT:
4249 {
4250 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4251 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4252 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4253 return iemRaiseGeneralProtectionFault0(pVCpu);
4254 pVCpu->cpum.GstCtx.rip = uNewIp;
4255 break;
4256 }
4257
4258 case IEMMODE_32BIT:
4259 {
4260 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4261 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4262
4263 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4264 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4265 return iemRaiseGeneralProtectionFault0(pVCpu);
4266 pVCpu->cpum.GstCtx.rip = uNewEip;
4267 break;
4268 }
4269
4270 case IEMMODE_64BIT:
4271 {
4272 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4273
4274 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4275 if (!IEM_IS_CANONICAL(uNewRip))
4276 return iemRaiseGeneralProtectionFault0(pVCpu);
4277 pVCpu->cpum.GstCtx.rip = uNewRip;
4278 break;
4279 }
4280
4281 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4282 }
4283
4284 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4285
4286#ifndef IEM_WITH_CODE_TLB
4287 /* Flush the prefetch buffer. */
4288 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4289#endif
4290
4291 return VINF_SUCCESS;
4292}
4293
4294
4295/**
4296 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4297 *
4298 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4299 * segment limit.
4300 *
4301 * @returns Strict VBox status code.
4302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4303 * @param offNextInstr The offset of the next instruction.
4304 */
4305VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4306{
4307 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4308
4309 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4310 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4311 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4312 return iemRaiseGeneralProtectionFault0(pVCpu);
4313 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4314 pVCpu->cpum.GstCtx.rip = uNewIp;
4315 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4316
4317#ifndef IEM_WITH_CODE_TLB
4318 /* Flush the prefetch buffer. */
4319 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4320#endif
4321
4322 return VINF_SUCCESS;
4323}
4324
4325
4326/**
4327 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4328 *
4329 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4330 * segment limit.
4331 *
4332 * @returns Strict VBox status code.
4333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4334 * @param offNextInstr The offset of the next instruction.
4335 */
4336VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4337{
4338 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4339
4340 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4341 {
4342 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4343
4344 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4345 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4346 return iemRaiseGeneralProtectionFault0(pVCpu);
4347 pVCpu->cpum.GstCtx.rip = uNewEip;
4348 }
4349 else
4350 {
4351 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4352
4353 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4354 if (!IEM_IS_CANONICAL(uNewRip))
4355 return iemRaiseGeneralProtectionFault0(pVCpu);
4356 pVCpu->cpum.GstCtx.rip = uNewRip;
4357 }
4358 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4359
4360#ifndef IEM_WITH_CODE_TLB
4361 /* Flush the prefetch buffer. */
4362 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4363#endif
4364
4365 return VINF_SUCCESS;
4366}
4367
4368
4369/**
4370 * Performs a near jump to the specified address.
4371 *
4372 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4373 * segment limit.
4374 *
4375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4376 * @param uNewRip The new RIP value.
4377 */
4378VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4379{
4380 switch (pVCpu->iem.s.enmEffOpSize)
4381 {
4382 case IEMMODE_16BIT:
4383 {
4384 Assert(uNewRip <= UINT16_MAX);
4385 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4386 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4387 return iemRaiseGeneralProtectionFault0(pVCpu);
4388 /** @todo Test 16-bit jump in 64-bit mode. */
4389 pVCpu->cpum.GstCtx.rip = uNewRip;
4390 break;
4391 }
4392
4393 case IEMMODE_32BIT:
4394 {
4395 Assert(uNewRip <= UINT32_MAX);
4396 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4397 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4398
4399 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4400 return iemRaiseGeneralProtectionFault0(pVCpu);
4401 pVCpu->cpum.GstCtx.rip = uNewRip;
4402 break;
4403 }
4404
4405 case IEMMODE_64BIT:
4406 {
4407 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4408
4409 if (!IEM_IS_CANONICAL(uNewRip))
4410 return iemRaiseGeneralProtectionFault0(pVCpu);
4411 pVCpu->cpum.GstCtx.rip = uNewRip;
4412 break;
4413 }
4414
4415 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4416 }
4417
4418 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4419
4420#ifndef IEM_WITH_CODE_TLB
4421 /* Flush the prefetch buffer. */
4422 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4423#endif
4424
4425 return VINF_SUCCESS;
4426}
4427
4428/** @} */
4429
4430
4431/** @name FPU access and helpers.
4432 *
4433 * @{
4434 */
4435
4436/**
4437 * Updates the x87.DS and FPUDP registers.
4438 *
4439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4440 * @param pFpuCtx The FPU context.
4441 * @param iEffSeg The effective segment register.
4442 * @param GCPtrEff The effective address relative to @a iEffSeg.
4443 */
4444DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4445{
4446 RTSEL sel;
4447 switch (iEffSeg)
4448 {
4449 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4450 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4451 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4452 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4453 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4454 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4455 default:
4456 AssertMsgFailed(("%d\n", iEffSeg));
4457 sel = pVCpu->cpum.GstCtx.ds.Sel;
4458 }
4459 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4460 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4461 {
4462 pFpuCtx->DS = 0;
4463 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4464 }
4465 else if (!IEM_IS_LONG_MODE(pVCpu))
4466 {
4467 pFpuCtx->DS = sel;
4468 pFpuCtx->FPUDP = GCPtrEff;
4469 }
4470 else
4471 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4472}
4473
4474
4475/**
4476 * Rotates the stack registers in the push direction.
4477 *
4478 * @param pFpuCtx The FPU context.
4479 * @remarks This is a complete waste of time, but fxsave stores the registers in
4480 * stack order.
4481 */
4482DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4483{
4484 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4485 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4486 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4487 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4488 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4489 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4490 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4491 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4492 pFpuCtx->aRegs[0].r80 = r80Tmp;
4493}
4494
4495
4496/**
4497 * Rotates the stack registers in the pop direction.
4498 *
4499 * @param pFpuCtx The FPU context.
4500 * @remarks This is a complete waste of time, but fxsave stores the registers in
4501 * stack order.
4502 */
4503DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4504{
4505 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4506 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4507 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4508 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4509 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4510 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4511 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4512 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4513 pFpuCtx->aRegs[7].r80 = r80Tmp;
4514}
4515
4516
4517/**
4518 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4519 * exception prevents it.
4520 *
4521 * @param pResult The FPU operation result to push.
4522 * @param pFpuCtx The FPU context.
4523 */
4524static void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4525{
4526 /* Update FSW and bail if there are pending exceptions afterwards. */
4527 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4528 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4529 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4530 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4531 {
4532 pFpuCtx->FSW = fFsw;
4533 return;
4534 }
4535
4536 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4537 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4538 {
4539 /* All is fine, push the actual value. */
4540 pFpuCtx->FTW |= RT_BIT(iNewTop);
4541 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4542 }
4543 else if (pFpuCtx->FCW & X86_FCW_IM)
4544 {
4545 /* Masked stack overflow, push QNaN. */
4546 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4547 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4548 }
4549 else
4550 {
4551 /* Raise stack overflow, don't push anything. */
4552 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4553 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4554 return;
4555 }
4556
4557 fFsw &= ~X86_FSW_TOP_MASK;
4558 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4559 pFpuCtx->FSW = fFsw;
4560
4561 iemFpuRotateStackPush(pFpuCtx);
4562}
4563
4564
4565/**
4566 * Stores a result in a FPU register and updates the FSW and FTW.
4567 *
4568 * @param pFpuCtx The FPU context.
4569 * @param pResult The result to store.
4570 * @param iStReg Which FPU register to store it in.
4571 */
4572static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4573{
4574 Assert(iStReg < 8);
4575 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4576 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4577 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4578 pFpuCtx->FTW |= RT_BIT(iReg);
4579 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4580}
4581
4582
4583/**
4584 * Only updates the FPU status word (FSW) with the result of the current
4585 * instruction.
4586 *
4587 * @param pFpuCtx The FPU context.
4588 * @param u16FSW The FSW output of the current instruction.
4589 */
4590static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4591{
4592 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4593 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4594}
4595
4596
4597/**
4598 * Pops one item off the FPU stack if no pending exception prevents it.
4599 *
4600 * @param pFpuCtx The FPU context.
4601 */
4602static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4603{
4604 /* Check pending exceptions. */
4605 uint16_t uFSW = pFpuCtx->FSW;
4606 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4607 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4608 return;
4609
4610 /* TOP--. */
4611 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4612 uFSW &= ~X86_FSW_TOP_MASK;
4613 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4614 pFpuCtx->FSW = uFSW;
4615
4616 /* Mark the previous ST0 as empty. */
4617 iOldTop >>= X86_FSW_TOP_SHIFT;
4618 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4619
4620 /* Rotate the registers. */
4621 iemFpuRotateStackPop(pFpuCtx);
4622}
4623
4624
4625/**
4626 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4627 *
4628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4629 * @param pResult The FPU operation result to push.
4630 */
4631void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4632{
4633 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4635 iemFpuMaybePushResult(pResult, pFpuCtx);
4636}
4637
4638
4639/**
4640 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4641 * and sets FPUDP and FPUDS.
4642 *
4643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4644 * @param pResult The FPU operation result to push.
4645 * @param iEffSeg The effective segment register.
4646 * @param GCPtrEff The effective address relative to @a iEffSeg.
4647 */
4648void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4649{
4650 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4651 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4652 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4653 iemFpuMaybePushResult(pResult, pFpuCtx);
4654}
4655
4656
4657/**
4658 * Replace ST0 with the first value and push the second onto the FPU stack,
4659 * unless a pending exception prevents it.
4660 *
4661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4662 * @param pResult The FPU operation result to store and push.
4663 */
4664void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4665{
4666 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4667 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4668
4669 /* Update FSW and bail if there are pending exceptions afterwards. */
4670 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4671 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4672 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4673 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4674 {
4675 pFpuCtx->FSW = fFsw;
4676 return;
4677 }
4678
4679 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4680 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4681 {
4682 /* All is fine, push the actual value. */
4683 pFpuCtx->FTW |= RT_BIT(iNewTop);
4684 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4685 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4686 }
4687 else if (pFpuCtx->FCW & X86_FCW_IM)
4688 {
4689 /* Masked stack overflow, push QNaN. */
4690 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4691 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4692 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4693 }
4694 else
4695 {
4696 /* Raise stack overflow, don't push anything. */
4697 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4699 return;
4700 }
4701
4702 fFsw &= ~X86_FSW_TOP_MASK;
4703 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4704 pFpuCtx->FSW = fFsw;
4705
4706 iemFpuRotateStackPush(pFpuCtx);
4707}
4708
4709
4710/**
4711 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4712 * FOP.
4713 *
4714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4715 * @param pResult The result to store.
4716 * @param iStReg Which FPU register to store it in.
4717 */
4718void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4719{
4720 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4721 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4722 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4723}
4724
4725
4726/**
4727 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4728 * FOP, and then pops the stack.
4729 *
4730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4731 * @param pResult The result to store.
4732 * @param iStReg Which FPU register to store it in.
4733 */
4734void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4735{
4736 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4737 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4738 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4739 iemFpuMaybePopOne(pFpuCtx);
4740}
4741
4742
4743/**
4744 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4745 * FPUDP, and FPUDS.
4746 *
4747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4748 * @param pResult The result to store.
4749 * @param iStReg Which FPU register to store it in.
4750 * @param iEffSeg The effective memory operand selector register.
4751 * @param GCPtrEff The effective memory operand offset.
4752 */
4753void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4754 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4755{
4756 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4757 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4758 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4759 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4760}
4761
4762
4763/**
4764 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4765 * FPUDP, and FPUDS, and then pops the stack.
4766 *
4767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4768 * @param pResult The result to store.
4769 * @param iStReg Which FPU register to store it in.
4770 * @param iEffSeg The effective memory operand selector register.
4771 * @param GCPtrEff The effective memory operand offset.
4772 */
4773void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4774 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4775{
4776 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4777 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4778 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4779 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4780 iemFpuMaybePopOne(pFpuCtx);
4781}
4782
4783
4784/**
4785 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4786 *
4787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4788 */
4789void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4790{
4791 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4792 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4793}
4794
4795
4796/**
4797 * Updates the FSW, FOP, FPUIP, and FPUCS.
4798 *
4799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4800 * @param u16FSW The FSW from the current instruction.
4801 */
4802void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4803{
4804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4805 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4806 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4807}
4808
4809
4810/**
4811 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4812 *
4813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4814 * @param u16FSW The FSW from the current instruction.
4815 */
4816void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4817{
4818 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4819 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4820 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4821 iemFpuMaybePopOne(pFpuCtx);
4822}
4823
4824
4825/**
4826 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4827 *
4828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4829 * @param u16FSW The FSW from the current instruction.
4830 * @param iEffSeg The effective memory operand selector register.
4831 * @param GCPtrEff The effective memory operand offset.
4832 */
4833void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4834{
4835 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4836 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4837 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4838 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4839}
4840
4841
4842/**
4843 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4844 *
4845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4846 * @param u16FSW The FSW from the current instruction.
4847 */
4848void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4849{
4850 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4851 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4852 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4853 iemFpuMaybePopOne(pFpuCtx);
4854 iemFpuMaybePopOne(pFpuCtx);
4855}
4856
4857
4858/**
4859 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4860 *
4861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4862 * @param u16FSW The FSW from the current instruction.
4863 * @param iEffSeg The effective memory operand selector register.
4864 * @param GCPtrEff The effective memory operand offset.
4865 */
4866void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4867{
4868 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4869 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4870 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4871 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4872 iemFpuMaybePopOne(pFpuCtx);
4873}
4874
4875
4876/**
4877 * Worker routine for raising an FPU stack underflow exception.
4878 *
4879 * @param pFpuCtx The FPU context.
4880 * @param iStReg The stack register being accessed.
4881 */
4882static void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
4883{
4884 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4885 if (pFpuCtx->FCW & X86_FCW_IM)
4886 {
4887 /* Masked underflow. */
4888 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4889 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4890 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4891 if (iStReg != UINT8_MAX)
4892 {
4893 pFpuCtx->FTW |= RT_BIT(iReg);
4894 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4895 }
4896 }
4897 else
4898 {
4899 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4900 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4901 }
4902}
4903
4904
4905/**
4906 * Raises a FPU stack underflow exception.
4907 *
4908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4909 * @param iStReg The destination register that should be loaded
4910 * with QNaN if \#IS is not masked. Specify
4911 * UINT8_MAX if none (like for fcom).
4912 */
4913void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4914{
4915 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4916 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4917 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4918}
4919
4920
4921void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4922{
4923 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4924 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4925 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4926 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4927}
4928
4929
4930void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4931{
4932 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4933 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4934 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4935 iemFpuMaybePopOne(pFpuCtx);
4936}
4937
4938
4939void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4940{
4941 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4942 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4943 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4944 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4945 iemFpuMaybePopOne(pFpuCtx);
4946}
4947
4948
4949void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
4950{
4951 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4952 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4953 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
4954 iemFpuMaybePopOne(pFpuCtx);
4955 iemFpuMaybePopOne(pFpuCtx);
4956}
4957
4958
4959void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
4960{
4961 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4962 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4963
4964 if (pFpuCtx->FCW & X86_FCW_IM)
4965 {
4966 /* Masked overflow - Push QNaN. */
4967 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4968 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4969 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4970 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4971 pFpuCtx->FTW |= RT_BIT(iNewTop);
4972 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4973 iemFpuRotateStackPush(pFpuCtx);
4974 }
4975 else
4976 {
4977 /* Exception pending - don't change TOP or the register stack. */
4978 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4979 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4980 }
4981}
4982
4983
4984void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
4985{
4986 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4987 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4988
4989 if (pFpuCtx->FCW & X86_FCW_IM)
4990 {
4991 /* Masked overflow - Push QNaN. */
4992 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4993 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4994 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4995 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4996 pFpuCtx->FTW |= RT_BIT(iNewTop);
4997 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4998 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4999 iemFpuRotateStackPush(pFpuCtx);
5000 }
5001 else
5002 {
5003 /* Exception pending - don't change TOP or the register stack. */
5004 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5005 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5006 }
5007}
5008
5009
5010/**
5011 * Worker routine for raising an FPU stack overflow exception on a push.
5012 *
5013 * @param pFpuCtx The FPU context.
5014 */
5015static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5016{
5017 if (pFpuCtx->FCW & X86_FCW_IM)
5018 {
5019 /* Masked overflow. */
5020 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5021 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5022 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5023 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5024 pFpuCtx->FTW |= RT_BIT(iNewTop);
5025 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5026 iemFpuRotateStackPush(pFpuCtx);
5027 }
5028 else
5029 {
5030 /* Exception pending - don't change TOP or the register stack. */
5031 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5032 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5033 }
5034}
5035
5036
5037/**
5038 * Raises a FPU stack overflow exception on a push.
5039 *
5040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5041 */
5042void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5043{
5044 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5045 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5046 iemFpuStackPushOverflowOnly(pFpuCtx);
5047}
5048
5049
5050/**
5051 * Raises a FPU stack overflow exception on a push with a memory operand.
5052 *
5053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5054 * @param iEffSeg The effective memory operand selector register.
5055 * @param GCPtrEff The effective memory operand offset.
5056 */
5057void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5058{
5059 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5060 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5061 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5062 iemFpuStackPushOverflowOnly(pFpuCtx);
5063}
5064
5065/** @} */
5066
5067
5068/** @name Memory access.
5069 *
5070 * @{
5071 */
5072
5073
5074/**
5075 * Updates the IEMCPU::cbWritten counter if applicable.
5076 *
5077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5078 * @param fAccess The access being accounted for.
5079 * @param cbMem The access size.
5080 */
5081DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5082{
5083 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5084 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5085 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5086}
5087
5088
5089/**
5090 * Applies the segment limit, base and attributes.
5091 *
5092 * This may raise a \#GP or \#SS.
5093 *
5094 * @returns VBox strict status code.
5095 *
5096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5097 * @param fAccess The kind of access which is being performed.
5098 * @param iSegReg The index of the segment register to apply.
5099 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5100 * TSS, ++).
5101 * @param cbMem The access size.
5102 * @param pGCPtrMem Pointer to the guest memory address to apply
5103 * segmentation to. Input and output parameter.
5104 */
5105VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5106{
5107 if (iSegReg == UINT8_MAX)
5108 return VINF_SUCCESS;
5109
5110 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5111 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5112 switch (pVCpu->iem.s.enmCpuMode)
5113 {
5114 case IEMMODE_16BIT:
5115 case IEMMODE_32BIT:
5116 {
5117 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5118 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5119
5120 if ( pSel->Attr.n.u1Present
5121 && !pSel->Attr.n.u1Unusable)
5122 {
5123 Assert(pSel->Attr.n.u1DescType);
5124 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5125 {
5126 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5127 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5128 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5129
5130 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5131 {
5132 /** @todo CPL check. */
5133 }
5134
5135 /*
5136 * There are two kinds of data selectors, normal and expand down.
5137 */
5138 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5139 {
5140 if ( GCPtrFirst32 > pSel->u32Limit
5141 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5142 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5143 }
5144 else
5145 {
5146 /*
5147 * The upper boundary is defined by the B bit, not the G bit!
5148 */
5149 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5150 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5151 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5152 }
5153 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5154 }
5155 else
5156 {
5157 /*
5158 * Code selector and usually be used to read thru, writing is
5159 * only permitted in real and V8086 mode.
5160 */
5161 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5162 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5163 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5164 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5165 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5166
5167 if ( GCPtrFirst32 > pSel->u32Limit
5168 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5169 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5170
5171 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5172 {
5173 /** @todo CPL check. */
5174 }
5175
5176 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5177 }
5178 }
5179 else
5180 return iemRaiseGeneralProtectionFault0(pVCpu);
5181 return VINF_SUCCESS;
5182 }
5183
5184 case IEMMODE_64BIT:
5185 {
5186 RTGCPTR GCPtrMem = *pGCPtrMem;
5187 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5188 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5189
5190 Assert(cbMem >= 1);
5191 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5192 return VINF_SUCCESS;
5193 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5194 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5195 return iemRaiseGeneralProtectionFault0(pVCpu);
5196 }
5197
5198 default:
5199 AssertFailedReturn(VERR_IEM_IPE_7);
5200 }
5201}
5202
5203
5204/**
5205 * Translates a virtual address to a physical physical address and checks if we
5206 * can access the page as specified.
5207 *
5208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5209 * @param GCPtrMem The virtual address.
5210 * @param fAccess The intended access.
5211 * @param pGCPhysMem Where to return the physical address.
5212 */
5213VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5214{
5215 /** @todo Need a different PGM interface here. We're currently using
5216 * generic / REM interfaces. this won't cut it for R0. */
5217 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5218 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5219 * here. */
5220 PGMPTWALK Walk;
5221 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5222 if (RT_FAILURE(rc))
5223 {
5224 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5225 /** @todo Check unassigned memory in unpaged mode. */
5226 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5227#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5228 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5229 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5230#endif
5231 *pGCPhysMem = NIL_RTGCPHYS;
5232 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5233 }
5234
5235 /* If the page is writable and does not have the no-exec bit set, all
5236 access is allowed. Otherwise we'll have to check more carefully... */
5237 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5238 {
5239 /* Write to read only memory? */
5240 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5241 && !(Walk.fEffective & X86_PTE_RW)
5242 && ( ( pVCpu->iem.s.uCpl == 3
5243 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5244 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5245 {
5246 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5247 *pGCPhysMem = NIL_RTGCPHYS;
5248#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5249 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5250 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5251#endif
5252 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5253 }
5254
5255 /* Kernel memory accessed by userland? */
5256 if ( !(Walk.fEffective & X86_PTE_US)
5257 && pVCpu->iem.s.uCpl == 3
5258 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5259 {
5260 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5261 *pGCPhysMem = NIL_RTGCPHYS;
5262#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5263 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5264 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5265#endif
5266 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5267 }
5268
5269 /* Executing non-executable memory? */
5270 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5271 && (Walk.fEffective & X86_PTE_PAE_NX)
5272 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5273 {
5274 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5275 *pGCPhysMem = NIL_RTGCPHYS;
5276#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5277 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5278 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5279#endif
5280 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5281 VERR_ACCESS_DENIED);
5282 }
5283 }
5284
5285 /*
5286 * Set the dirty / access flags.
5287 * ASSUMES this is set when the address is translated rather than on committ...
5288 */
5289 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5290 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5291 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5292 {
5293 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5294 AssertRC(rc2);
5295 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5296 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5297 }
5298
5299 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5300 *pGCPhysMem = GCPhys;
5301 return VINF_SUCCESS;
5302}
5303
5304
5305/**
5306 * Looks up a memory mapping entry.
5307 *
5308 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5310 * @param pvMem The memory address.
5311 * @param fAccess The access to.
5312 */
5313DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5314{
5315 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5316 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5317 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5318 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5319 return 0;
5320 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5321 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5322 return 1;
5323 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5324 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5325 return 2;
5326 return VERR_NOT_FOUND;
5327}
5328
5329
5330/**
5331 * Finds a free memmap entry when using iNextMapping doesn't work.
5332 *
5333 * @returns Memory mapping index, 1024 on failure.
5334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5335 */
5336static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5337{
5338 /*
5339 * The easy case.
5340 */
5341 if (pVCpu->iem.s.cActiveMappings == 0)
5342 {
5343 pVCpu->iem.s.iNextMapping = 1;
5344 return 0;
5345 }
5346
5347 /* There should be enough mappings for all instructions. */
5348 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5349
5350 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5351 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5352 return i;
5353
5354 AssertFailedReturn(1024);
5355}
5356
5357
5358/**
5359 * Commits a bounce buffer that needs writing back and unmaps it.
5360 *
5361 * @returns Strict VBox status code.
5362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5363 * @param iMemMap The index of the buffer to commit.
5364 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5365 * Always false in ring-3, obviously.
5366 */
5367static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5368{
5369 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5370 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5371#ifdef IN_RING3
5372 Assert(!fPostponeFail);
5373 RT_NOREF_PV(fPostponeFail);
5374#endif
5375
5376 /*
5377 * Do the writing.
5378 */
5379 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5380 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5381 {
5382 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5383 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5384 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5385 if (!pVCpu->iem.s.fBypassHandlers)
5386 {
5387 /*
5388 * Carefully and efficiently dealing with access handler return
5389 * codes make this a little bloated.
5390 */
5391 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5393 pbBuf,
5394 cbFirst,
5395 PGMACCESSORIGIN_IEM);
5396 if (rcStrict == VINF_SUCCESS)
5397 {
5398 if (cbSecond)
5399 {
5400 rcStrict = PGMPhysWrite(pVM,
5401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5402 pbBuf + cbFirst,
5403 cbSecond,
5404 PGMACCESSORIGIN_IEM);
5405 if (rcStrict == VINF_SUCCESS)
5406 { /* nothing */ }
5407 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5408 {
5409 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5412 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5413 }
5414#ifndef IN_RING3
5415 else if (fPostponeFail)
5416 {
5417 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5419 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5420 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5421 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5422 return iemSetPassUpStatus(pVCpu, rcStrict);
5423 }
5424#endif
5425 else
5426 {
5427 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5429 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5430 return rcStrict;
5431 }
5432 }
5433 }
5434 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5435 {
5436 if (!cbSecond)
5437 {
5438 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5440 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5441 }
5442 else
5443 {
5444 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5445 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5446 pbBuf + cbFirst,
5447 cbSecond,
5448 PGMACCESSORIGIN_IEM);
5449 if (rcStrict2 == VINF_SUCCESS)
5450 {
5451 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5454 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5455 }
5456 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5457 {
5458 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5460 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5461 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5462 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5463 }
5464#ifndef IN_RING3
5465 else if (fPostponeFail)
5466 {
5467 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5470 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5471 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5472 return iemSetPassUpStatus(pVCpu, rcStrict);
5473 }
5474#endif
5475 else
5476 {
5477 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5480 return rcStrict2;
5481 }
5482 }
5483 }
5484#ifndef IN_RING3
5485 else if (fPostponeFail)
5486 {
5487 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5489 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5490 if (!cbSecond)
5491 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5492 else
5493 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5494 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5495 return iemSetPassUpStatus(pVCpu, rcStrict);
5496 }
5497#endif
5498 else
5499 {
5500 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5503 return rcStrict;
5504 }
5505 }
5506 else
5507 {
5508 /*
5509 * No access handlers, much simpler.
5510 */
5511 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5512 if (RT_SUCCESS(rc))
5513 {
5514 if (cbSecond)
5515 {
5516 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5517 if (RT_SUCCESS(rc))
5518 { /* likely */ }
5519 else
5520 {
5521 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5523 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5524 return rc;
5525 }
5526 }
5527 }
5528 else
5529 {
5530 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5531 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5532 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5533 return rc;
5534 }
5535 }
5536 }
5537
5538#if defined(IEM_LOG_MEMORY_WRITES)
5539 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5540 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5541 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5542 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5543 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5544 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5545
5546 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5547 g_cbIemWrote = cbWrote;
5548 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5549#endif
5550
5551 /*
5552 * Free the mapping entry.
5553 */
5554 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5555 Assert(pVCpu->iem.s.cActiveMappings != 0);
5556 pVCpu->iem.s.cActiveMappings--;
5557 return VINF_SUCCESS;
5558}
5559
5560
5561/**
5562 * iemMemMap worker that deals with a request crossing pages.
5563 */
5564static VBOXSTRICTRC
5565iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5566{
5567 /*
5568 * Do the address translations.
5569 */
5570 RTGCPHYS GCPhysFirst;
5571 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5572 if (rcStrict != VINF_SUCCESS)
5573 return rcStrict;
5574
5575 RTGCPHYS GCPhysSecond;
5576 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5577 fAccess, &GCPhysSecond);
5578 if (rcStrict != VINF_SUCCESS)
5579 return rcStrict;
5580 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5581
5582 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5583
5584 /*
5585 * Read in the current memory content if it's a read, execute or partial
5586 * write access.
5587 */
5588 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5589 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5590 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5591
5592 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5593 {
5594 if (!pVCpu->iem.s.fBypassHandlers)
5595 {
5596 /*
5597 * Must carefully deal with access handler status codes here,
5598 * makes the code a bit bloated.
5599 */
5600 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5601 if (rcStrict == VINF_SUCCESS)
5602 {
5603 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5604 if (rcStrict == VINF_SUCCESS)
5605 { /*likely */ }
5606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5607 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5608 else
5609 {
5610 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5611 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5612 return rcStrict;
5613 }
5614 }
5615 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5616 {
5617 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5618 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5619 {
5620 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5621 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5622 }
5623 else
5624 {
5625 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5626 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5627 return rcStrict2;
5628 }
5629 }
5630 else
5631 {
5632 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5633 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5634 return rcStrict;
5635 }
5636 }
5637 else
5638 {
5639 /*
5640 * No informational status codes here, much more straight forward.
5641 */
5642 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5643 if (RT_SUCCESS(rc))
5644 {
5645 Assert(rc == VINF_SUCCESS);
5646 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5647 if (RT_SUCCESS(rc))
5648 Assert(rc == VINF_SUCCESS);
5649 else
5650 {
5651 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5652 return rc;
5653 }
5654 }
5655 else
5656 {
5657 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5658 return rc;
5659 }
5660 }
5661 }
5662#ifdef VBOX_STRICT
5663 else
5664 memset(pbBuf, 0xcc, cbMem);
5665 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5666 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5667#endif
5668
5669 /*
5670 * Commit the bounce buffer entry.
5671 */
5672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5673 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5674 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5675 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5676 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5677 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5678 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5679 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5680 pVCpu->iem.s.cActiveMappings++;
5681
5682 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5683 *ppvMem = pbBuf;
5684 return VINF_SUCCESS;
5685}
5686
5687
5688/**
5689 * iemMemMap woker that deals with iemMemPageMap failures.
5690 */
5691static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5692 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5693{
5694 /*
5695 * Filter out conditions we can handle and the ones which shouldn't happen.
5696 */
5697 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5698 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5699 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5700 {
5701 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5702 return rcMap;
5703 }
5704 pVCpu->iem.s.cPotentialExits++;
5705
5706 /*
5707 * Read in the current memory content if it's a read, execute or partial
5708 * write access.
5709 */
5710 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5711 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5712 {
5713 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5714 memset(pbBuf, 0xff, cbMem);
5715 else
5716 {
5717 int rc;
5718 if (!pVCpu->iem.s.fBypassHandlers)
5719 {
5720 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5721 if (rcStrict == VINF_SUCCESS)
5722 { /* nothing */ }
5723 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5724 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5725 else
5726 {
5727 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5728 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5729 return rcStrict;
5730 }
5731 }
5732 else
5733 {
5734 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5735 if (RT_SUCCESS(rc))
5736 { /* likely */ }
5737 else
5738 {
5739 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5740 GCPhysFirst, rc));
5741 return rc;
5742 }
5743 }
5744 }
5745 }
5746#ifdef VBOX_STRICT
5747 else
5748 memset(pbBuf, 0xcc, cbMem);
5749#endif
5750#ifdef VBOX_STRICT
5751 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5752 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5753#endif
5754
5755 /*
5756 * Commit the bounce buffer entry.
5757 */
5758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5759 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5760 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5761 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5762 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5763 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5764 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5765 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5766 pVCpu->iem.s.cActiveMappings++;
5767
5768 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5769 *ppvMem = pbBuf;
5770 return VINF_SUCCESS;
5771}
5772
5773
5774
5775/**
5776 * Maps the specified guest memory for the given kind of access.
5777 *
5778 * This may be using bounce buffering of the memory if it's crossing a page
5779 * boundary or if there is an access handler installed for any of it. Because
5780 * of lock prefix guarantees, we're in for some extra clutter when this
5781 * happens.
5782 *
5783 * This may raise a \#GP, \#SS, \#PF or \#AC.
5784 *
5785 * @returns VBox strict status code.
5786 *
5787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5788 * @param ppvMem Where to return the pointer to the mapped memory.
5789 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5790 * 8, 12, 16, 32 or 512. When used by string operations
5791 * it can be up to a page.
5792 * @param iSegReg The index of the segment register to use for this
5793 * access. The base and limits are checked. Use UINT8_MAX
5794 * to indicate that no segmentation is required (for IDT,
5795 * GDT and LDT accesses).
5796 * @param GCPtrMem The address of the guest memory.
5797 * @param fAccess How the memory is being accessed. The
5798 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5799 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5800 * when raising exceptions.
5801 * @param uAlignCtl Alignment control:
5802 * - Bits 15:0 is the alignment mask.
5803 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5804 * IEM_MEMMAP_F_ALIGN_SSE, and
5805 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5806 * Pass zero to skip alignment.
5807 */
5808VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5809 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5810{
5811 /*
5812 * Check the input and figure out which mapping entry to use.
5813 */
5814 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
5815 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5816 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5817
5818 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5819 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5820 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5821 {
5822 iMemMap = iemMemMapFindFree(pVCpu);
5823 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5824 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5825 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5826 pVCpu->iem.s.aMemMappings[2].fAccess),
5827 VERR_IEM_IPE_9);
5828 }
5829
5830 /*
5831 * Map the memory, checking that we can actually access it. If something
5832 * slightly complicated happens, fall back on bounce buffering.
5833 */
5834 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5835 if (rcStrict == VINF_SUCCESS)
5836 { /* likely */ }
5837 else
5838 return rcStrict;
5839
5840 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5841 { /* likely */ }
5842 else
5843 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5844
5845 /*
5846 * Alignment check.
5847 */
5848 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
5849 { /* likelyish */ }
5850 else
5851 {
5852 /* Misaligned access. */
5853 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
5854 {
5855 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
5856 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
5857 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
5858 {
5859 AssertCompile(X86_CR0_AM == X86_EFL_AC);
5860
5861 if (iemMemAreAlignmentChecksEnabled(pVCpu))
5862 return iemRaiseAlignmentCheckException(pVCpu);
5863 }
5864 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
5865 && iemMemAreAlignmentChecksEnabled(pVCpu)
5866/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
5867 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
5868 )
5869 return iemRaiseAlignmentCheckException(pVCpu);
5870 else
5871 return iemRaiseGeneralProtectionFault0(pVCpu);
5872 }
5873 }
5874
5875#ifdef IEM_WITH_DATA_TLB
5876 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5877
5878 /*
5879 * Get the TLB entry for this page.
5880 */
5881 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
5882 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
5883 if (pTlbe->uTag == uTag)
5884 {
5885# ifdef VBOX_WITH_STATISTICS
5886 pVCpu->iem.s.DataTlb.cTlbHits++;
5887# endif
5888 }
5889 else
5890 {
5891 pVCpu->iem.s.DataTlb.cTlbMisses++;
5892 PGMPTWALK Walk;
5893 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5894 if (RT_FAILURE(rc))
5895 {
5896 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5897# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5898 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5899 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5900# endif
5901 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
5902 }
5903
5904 Assert(Walk.fSucceeded);
5905 pTlbe->uTag = uTag;
5906 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
5907 pTlbe->GCPhys = Walk.GCPhys;
5908 pTlbe->pbMappingR3 = NULL;
5909 }
5910
5911 /*
5912 * Check TLB page table level access flags.
5913 */
5914 /* If the page is either supervisor only or non-writable, we need to do
5915 more careful access checks. */
5916 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
5917 {
5918 /* Write to read only memory? */
5919 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
5920 && (fAccess & IEM_ACCESS_TYPE_WRITE)
5921 && ( ( pVCpu->iem.s.uCpl == 3
5922 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5923 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5924 {
5925 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5926# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5927 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5928 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5929# endif
5930 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5931 }
5932
5933 /* Kernel memory accessed by userland? */
5934 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
5935 && pVCpu->iem.s.uCpl == 3
5936 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5937 {
5938 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5939# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5940 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5941 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5942# endif
5943 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5944 }
5945 }
5946
5947 /*
5948 * Set the dirty / access flags.
5949 * ASSUMES this is set when the address is translated rather than on commit...
5950 */
5951 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5952 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
5953 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
5954 {
5955 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5956 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5957 AssertRC(rc2);
5958 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5959 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5960 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
5961 }
5962
5963 /*
5964 * Look up the physical page info if necessary.
5965 */
5966 uint8_t *pbMem = NULL;
5967 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
5968# ifdef IN_RING3
5969 pbMem = pTlbe->pbMappingR3;
5970# else
5971 pbMem = NULL;
5972# endif
5973 else
5974 {
5975 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
5976 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
5977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
5978 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
5979 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
5980 { /* likely */ }
5981 else
5982 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
5983 pTlbe->pbMappingR3 = NULL;
5984 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
5985 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
5986 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
5987 &pbMem, &pTlbe->fFlagsAndPhysRev);
5988 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
5989# ifdef IN_RING3
5990 pTlbe->pbMappingR3 = pbMem;
5991# endif
5992 }
5993
5994 /*
5995 * Check the physical page level access and mapping.
5996 */
5997 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
5998 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
5999 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6000 { /* probably likely */ }
6001 else
6002 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6003 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6004 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6005 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6006 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6007 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6008
6009 if (pbMem)
6010 {
6011 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6012 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6013 fAccess |= IEM_ACCESS_NOT_LOCKED;
6014 }
6015 else
6016 {
6017 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6018 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6019 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6020 if (rcStrict != VINF_SUCCESS)
6021 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6022 }
6023
6024 void * const pvMem = pbMem;
6025
6026 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6027 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6028 if (fAccess & IEM_ACCESS_TYPE_READ)
6029 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6030
6031#else /* !IEM_WITH_DATA_TLB */
6032
6033 RTGCPHYS GCPhysFirst;
6034 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6035 if (rcStrict != VINF_SUCCESS)
6036 return rcStrict;
6037
6038 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6039 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6040 if (fAccess & IEM_ACCESS_TYPE_READ)
6041 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6042
6043 void *pvMem;
6044 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6045 if (rcStrict != VINF_SUCCESS)
6046 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6047
6048#endif /* !IEM_WITH_DATA_TLB */
6049
6050 /*
6051 * Fill in the mapping table entry.
6052 */
6053 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6054 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6055 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6056 pVCpu->iem.s.cActiveMappings += 1;
6057
6058 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6059 *ppvMem = pvMem;
6060
6061 return VINF_SUCCESS;
6062}
6063
6064
6065/**
6066 * Commits the guest memory if bounce buffered and unmaps it.
6067 *
6068 * @returns Strict VBox status code.
6069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6070 * @param pvMem The mapping.
6071 * @param fAccess The kind of access.
6072 */
6073VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6074{
6075 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6076 AssertReturn(iMemMap >= 0, iMemMap);
6077
6078 /* If it's bounce buffered, we may need to write back the buffer. */
6079 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6080 {
6081 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6082 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6083 }
6084 /* Otherwise unlock it. */
6085 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6086 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6087
6088 /* Free the entry. */
6089 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6090 Assert(pVCpu->iem.s.cActiveMappings != 0);
6091 pVCpu->iem.s.cActiveMappings--;
6092 return VINF_SUCCESS;
6093}
6094
6095#ifdef IEM_WITH_SETJMP
6096
6097/**
6098 * Maps the specified guest memory for the given kind of access, longjmp on
6099 * error.
6100 *
6101 * This may be using bounce buffering of the memory if it's crossing a page
6102 * boundary or if there is an access handler installed for any of it. Because
6103 * of lock prefix guarantees, we're in for some extra clutter when this
6104 * happens.
6105 *
6106 * This may raise a \#GP, \#SS, \#PF or \#AC.
6107 *
6108 * @returns Pointer to the mapped memory.
6109 *
6110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6111 * @param cbMem The number of bytes to map. This is usually 1,
6112 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6113 * string operations it can be up to a page.
6114 * @param iSegReg The index of the segment register to use for
6115 * this access. The base and limits are checked.
6116 * Use UINT8_MAX to indicate that no segmentation
6117 * is required (for IDT, GDT and LDT accesses).
6118 * @param GCPtrMem The address of the guest memory.
6119 * @param fAccess How the memory is being accessed. The
6120 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6121 * how to map the memory, while the
6122 * IEM_ACCESS_WHAT_XXX bit is used when raising
6123 * exceptions.
6124 * @param uAlignCtl Alignment control:
6125 * - Bits 15:0 is the alignment mask.
6126 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6127 * IEM_MEMMAP_F_ALIGN_SSE, and
6128 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6129 * Pass zero to skip alignment.
6130 */
6131void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6132 uint32_t uAlignCtl) RT_NOEXCEPT
6133{
6134 /*
6135 * Check the input, check segment access and adjust address
6136 * with segment base.
6137 */
6138 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6139 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6140 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6141
6142 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6143 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6144 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6145
6146 /*
6147 * Alignment check.
6148 */
6149 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6150 { /* likelyish */ }
6151 else
6152 {
6153 /* Misaligned access. */
6154 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6155 {
6156 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6157 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6158 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6159 {
6160 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6161
6162 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6163 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6164 }
6165 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6166 && iemMemAreAlignmentChecksEnabled(pVCpu)
6167/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6168 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6169 )
6170 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6171 else
6172 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6173 }
6174 }
6175
6176 /*
6177 * Figure out which mapping entry to use.
6178 */
6179 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6180 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6181 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6182 {
6183 iMemMap = iemMemMapFindFree(pVCpu);
6184 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6185 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6186 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6187 pVCpu->iem.s.aMemMappings[2].fAccess),
6188 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6189 }
6190
6191 /*
6192 * Crossing a page boundary?
6193 */
6194 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6195 { /* No (likely). */ }
6196 else
6197 {
6198 void *pvMem;
6199 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6200 if (rcStrict == VINF_SUCCESS)
6201 return pvMem;
6202 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6203 }
6204
6205#ifdef IEM_WITH_DATA_TLB
6206 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6207
6208 /*
6209 * Get the TLB entry for this page.
6210 */
6211 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6212 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6213 if (pTlbe->uTag == uTag)
6214 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6215 else
6216 {
6217 pVCpu->iem.s.DataTlb.cTlbMisses++;
6218 PGMPTWALK Walk;
6219 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6220 if (RT_FAILURE(rc))
6221 {
6222 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6223# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6224 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6225 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6226# endif
6227 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6228 }
6229
6230 Assert(Walk.fSucceeded);
6231 pTlbe->uTag = uTag;
6232 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6233 pTlbe->GCPhys = Walk.GCPhys;
6234 pTlbe->pbMappingR3 = NULL;
6235 }
6236
6237 /*
6238 * Check the flags and physical revision.
6239 */
6240 /** @todo make the caller pass these in with fAccess. */
6241 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6242 ? IEMTLBE_F_PT_NO_USER : 0;
6243 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6244 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6245 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6246 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6247 ? IEMTLBE_F_PT_NO_WRITE : 0)
6248 : 0;
6249 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6250 uint8_t *pbMem = NULL;
6251 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6252 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6253# ifdef IN_RING3
6254 pbMem = pTlbe->pbMappingR3;
6255# else
6256 pbMem = NULL;
6257# endif
6258 else
6259 {
6260 /*
6261 * Okay, something isn't quite right or needs refreshing.
6262 */
6263 /* Write to read only memory? */
6264 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6265 {
6266 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6267# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6268 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6269 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6270# endif
6271 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6272 }
6273
6274 /* Kernel memory accessed by userland? */
6275 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6276 {
6277 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6278# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6279 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6280 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6281# endif
6282 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6283 }
6284
6285 /* Set the dirty / access flags.
6286 ASSUMES this is set when the address is translated rather than on commit... */
6287 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6288 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6289 {
6290 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6291 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6292 AssertRC(rc2);
6293 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6294 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6295 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6296 }
6297
6298 /*
6299 * Check if the physical page info needs updating.
6300 */
6301 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6302# ifdef IN_RING3
6303 pbMem = pTlbe->pbMappingR3;
6304# else
6305 pbMem = NULL;
6306# endif
6307 else
6308 {
6309 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6310 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6311 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6312 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6313 pTlbe->pbMappingR3 = NULL;
6314 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6315 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6316 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6317 &pbMem, &pTlbe->fFlagsAndPhysRev);
6318 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6319# ifdef IN_RING3
6320 pTlbe->pbMappingR3 = pbMem;
6321# endif
6322 }
6323
6324 /*
6325 * Check the physical page level access and mapping.
6326 */
6327 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6328 { /* probably likely */ }
6329 else
6330 {
6331 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6332 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6333 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6334 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6335 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6336 if (rcStrict == VINF_SUCCESS)
6337 return pbMem;
6338 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6339 }
6340 }
6341 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6342
6343 if (pbMem)
6344 {
6345 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6346 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6347 fAccess |= IEM_ACCESS_NOT_LOCKED;
6348 }
6349 else
6350 {
6351 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6352 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6353 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6354 if (rcStrict == VINF_SUCCESS)
6355 return pbMem;
6356 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6357 }
6358
6359 void * const pvMem = pbMem;
6360
6361 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6362 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6363 if (fAccess & IEM_ACCESS_TYPE_READ)
6364 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6365
6366#else /* !IEM_WITH_DATA_TLB */
6367
6368
6369 RTGCPHYS GCPhysFirst;
6370 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6371 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6372 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6373
6374 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6375 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6376 if (fAccess & IEM_ACCESS_TYPE_READ)
6377 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6378
6379 void *pvMem;
6380 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6381 if (rcStrict == VINF_SUCCESS)
6382 { /* likely */ }
6383 else
6384 {
6385 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6386 if (rcStrict == VINF_SUCCESS)
6387 return pvMem;
6388 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6389 }
6390
6391#endif /* !IEM_WITH_DATA_TLB */
6392
6393 /*
6394 * Fill in the mapping table entry.
6395 */
6396 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6397 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6398 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6399 pVCpu->iem.s.cActiveMappings++;
6400
6401 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6402 return pvMem;
6403}
6404
6405
6406/**
6407 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6408 *
6409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6410 * @param pvMem The mapping.
6411 * @param fAccess The kind of access.
6412 */
6413void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6414{
6415 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6416 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6417
6418 /* If it's bounce buffered, we may need to write back the buffer. */
6419 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6420 {
6421 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6422 {
6423 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6424 if (rcStrict == VINF_SUCCESS)
6425 return;
6426 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6427 }
6428 }
6429 /* Otherwise unlock it. */
6430 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6431 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6432
6433 /* Free the entry. */
6434 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6435 Assert(pVCpu->iem.s.cActiveMappings != 0);
6436 pVCpu->iem.s.cActiveMappings--;
6437}
6438
6439#endif /* IEM_WITH_SETJMP */
6440
6441#ifndef IN_RING3
6442/**
6443 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6444 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6445 *
6446 * Allows the instruction to be completed and retired, while the IEM user will
6447 * return to ring-3 immediately afterwards and do the postponed writes there.
6448 *
6449 * @returns VBox status code (no strict statuses). Caller must check
6450 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6452 * @param pvMem The mapping.
6453 * @param fAccess The kind of access.
6454 */
6455VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6456{
6457 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6458 AssertReturn(iMemMap >= 0, iMemMap);
6459
6460 /* If it's bounce buffered, we may need to write back the buffer. */
6461 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6462 {
6463 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6464 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6465 }
6466 /* Otherwise unlock it. */
6467 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6468 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6469
6470 /* Free the entry. */
6471 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6472 Assert(pVCpu->iem.s.cActiveMappings != 0);
6473 pVCpu->iem.s.cActiveMappings--;
6474 return VINF_SUCCESS;
6475}
6476#endif
6477
6478
6479/**
6480 * Rollbacks mappings, releasing page locks and such.
6481 *
6482 * The caller shall only call this after checking cActiveMappings.
6483 *
6484 * @returns Strict VBox status code to pass up.
6485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6486 */
6487void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6488{
6489 Assert(pVCpu->iem.s.cActiveMappings > 0);
6490
6491 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6492 while (iMemMap-- > 0)
6493 {
6494 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6495 if (fAccess != IEM_ACCESS_INVALID)
6496 {
6497 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6498 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6499 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6500 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6501 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6502 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6503 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6505 pVCpu->iem.s.cActiveMappings--;
6506 }
6507 }
6508}
6509
6510
6511/**
6512 * Fetches a data byte.
6513 *
6514 * @returns Strict VBox status code.
6515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6516 * @param pu8Dst Where to return the byte.
6517 * @param iSegReg The index of the segment register to use for
6518 * this access. The base and limits are checked.
6519 * @param GCPtrMem The address of the guest memory.
6520 */
6521VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6522{
6523 /* The lazy approach for now... */
6524 uint8_t const *pu8Src;
6525 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6526 if (rc == VINF_SUCCESS)
6527 {
6528 *pu8Dst = *pu8Src;
6529 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6530 }
6531 return rc;
6532}
6533
6534
6535#ifdef IEM_WITH_SETJMP
6536/**
6537 * Fetches a data byte, longjmp on error.
6538 *
6539 * @returns The byte.
6540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6541 * @param iSegReg The index of the segment register to use for
6542 * this access. The base and limits are checked.
6543 * @param GCPtrMem The address of the guest memory.
6544 */
6545uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6546{
6547 /* The lazy approach for now... */
6548 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6549 uint8_t const bRet = *pu8Src;
6550 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6551 return bRet;
6552}
6553#endif /* IEM_WITH_SETJMP */
6554
6555
6556/**
6557 * Fetches a data word.
6558 *
6559 * @returns Strict VBox status code.
6560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6561 * @param pu16Dst Where to return the word.
6562 * @param iSegReg The index of the segment register to use for
6563 * this access. The base and limits are checked.
6564 * @param GCPtrMem The address of the guest memory.
6565 */
6566VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6567{
6568 /* The lazy approach for now... */
6569 uint16_t const *pu16Src;
6570 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6571 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6572 if (rc == VINF_SUCCESS)
6573 {
6574 *pu16Dst = *pu16Src;
6575 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6576 }
6577 return rc;
6578}
6579
6580
6581#ifdef IEM_WITH_SETJMP
6582/**
6583 * Fetches a data word, longjmp on error.
6584 *
6585 * @returns The word
6586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6587 * @param iSegReg The index of the segment register to use for
6588 * this access. The base and limits are checked.
6589 * @param GCPtrMem The address of the guest memory.
6590 */
6591uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6592{
6593 /* The lazy approach for now... */
6594 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6595 sizeof(*pu16Src) - 1);
6596 uint16_t const u16Ret = *pu16Src;
6597 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6598 return u16Ret;
6599}
6600#endif
6601
6602
6603/**
6604 * Fetches a data dword.
6605 *
6606 * @returns Strict VBox status code.
6607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6608 * @param pu32Dst Where to return the dword.
6609 * @param iSegReg The index of the segment register to use for
6610 * this access. The base and limits are checked.
6611 * @param GCPtrMem The address of the guest memory.
6612 */
6613VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6614{
6615 /* The lazy approach for now... */
6616 uint32_t const *pu32Src;
6617 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6618 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6619 if (rc == VINF_SUCCESS)
6620 {
6621 *pu32Dst = *pu32Src;
6622 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6623 }
6624 return rc;
6625}
6626
6627
6628/**
6629 * Fetches a data dword and zero extends it to a qword.
6630 *
6631 * @returns Strict VBox status code.
6632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6633 * @param pu64Dst Where to return the qword.
6634 * @param iSegReg The index of the segment register to use for
6635 * this access. The base and limits are checked.
6636 * @param GCPtrMem The address of the guest memory.
6637 */
6638VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6639{
6640 /* The lazy approach for now... */
6641 uint32_t const *pu32Src;
6642 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6643 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6644 if (rc == VINF_SUCCESS)
6645 {
6646 *pu64Dst = *pu32Src;
6647 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6648 }
6649 return rc;
6650}
6651
6652
6653#ifdef IEM_WITH_SETJMP
6654
6655/**
6656 * Fetches a data dword, longjmp on error, fallback/safe version.
6657 *
6658 * @returns The dword
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 * @param iSegReg The index of the segment register to use for
6661 * this access. The base and limits are checked.
6662 * @param GCPtrMem The address of the guest memory.
6663 */
6664uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6665{
6666 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6667 sizeof(*pu32Src) - 1);
6668 uint32_t const u32Ret = *pu32Src;
6669 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6670 return u32Ret;
6671}
6672
6673
6674/**
6675 * Fetches a data dword, longjmp on error.
6676 *
6677 * @returns The dword
6678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6679 * @param iSegReg The index of the segment register to use for
6680 * this access. The base and limits are checked.
6681 * @param GCPtrMem The address of the guest memory.
6682 */
6683uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6684{
6685# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6686 /*
6687 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6688 */
6689 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6690 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6691 {
6692 /*
6693 * TLB lookup.
6694 */
6695 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6696 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6697 if (pTlbe->uTag == uTag)
6698 {
6699 /*
6700 * Check TLB page table level access flags.
6701 */
6702 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6703 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6704 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6705 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6706 {
6707 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6708
6709 /*
6710 * Alignment check:
6711 */
6712 /** @todo check priority \#AC vs \#PF */
6713 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6714 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6715 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6716 || pVCpu->iem.s.uCpl != 3)
6717 {
6718 /*
6719 * Fetch and return the dword
6720 */
6721 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6722 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6723 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6724 }
6725 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6726 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6727 }
6728 }
6729 }
6730
6731 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6732 outdated page pointer, or other troubles. */
6733 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6734 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6735
6736# else
6737 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6738 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6739 uint32_t const u32Ret = *pu32Src;
6740 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6741 return u32Ret;
6742# endif
6743}
6744#endif
6745
6746
6747#ifdef SOME_UNUSED_FUNCTION
6748/**
6749 * Fetches a data dword and sign extends it to a qword.
6750 *
6751 * @returns Strict VBox status code.
6752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6753 * @param pu64Dst Where to return the sign extended value.
6754 * @param iSegReg The index of the segment register to use for
6755 * this access. The base and limits are checked.
6756 * @param GCPtrMem The address of the guest memory.
6757 */
6758VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6759{
6760 /* The lazy approach for now... */
6761 int32_t const *pi32Src;
6762 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6763 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6764 if (rc == VINF_SUCCESS)
6765 {
6766 *pu64Dst = *pi32Src;
6767 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6768 }
6769#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6770 else
6771 *pu64Dst = 0;
6772#endif
6773 return rc;
6774}
6775#endif
6776
6777
6778/**
6779 * Fetches a data qword.
6780 *
6781 * @returns Strict VBox status code.
6782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6783 * @param pu64Dst Where to return the qword.
6784 * @param iSegReg The index of the segment register to use for
6785 * this access. The base and limits are checked.
6786 * @param GCPtrMem The address of the guest memory.
6787 */
6788VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6789{
6790 /* The lazy approach for now... */
6791 uint64_t const *pu64Src;
6792 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6793 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6794 if (rc == VINF_SUCCESS)
6795 {
6796 *pu64Dst = *pu64Src;
6797 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6798 }
6799 return rc;
6800}
6801
6802
6803#ifdef IEM_WITH_SETJMP
6804/**
6805 * Fetches a data qword, longjmp on error.
6806 *
6807 * @returns The qword.
6808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6809 * @param iSegReg The index of the segment register to use for
6810 * this access. The base and limits are checked.
6811 * @param GCPtrMem The address of the guest memory.
6812 */
6813uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6814{
6815 /* The lazy approach for now... */
6816 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6817 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6818 uint64_t const u64Ret = *pu64Src;
6819 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6820 return u64Ret;
6821}
6822#endif
6823
6824
6825/**
6826 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6827 *
6828 * @returns Strict VBox status code.
6829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6830 * @param pu64Dst Where to return the qword.
6831 * @param iSegReg The index of the segment register to use for
6832 * this access. The base and limits are checked.
6833 * @param GCPtrMem The address of the guest memory.
6834 */
6835VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6836{
6837 /* The lazy approach for now... */
6838 uint64_t const *pu64Src;
6839 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6840 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6841 if (rc == VINF_SUCCESS)
6842 {
6843 *pu64Dst = *pu64Src;
6844 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6845 }
6846 return rc;
6847}
6848
6849
6850#ifdef IEM_WITH_SETJMP
6851/**
6852 * Fetches a data qword, longjmp on error.
6853 *
6854 * @returns The qword.
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param iSegReg The index of the segment register to use for
6857 * this access. The base and limits are checked.
6858 * @param GCPtrMem The address of the guest memory.
6859 */
6860uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6861{
6862 /* The lazy approach for now... */
6863 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6864 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6865 uint64_t const u64Ret = *pu64Src;
6866 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6867 return u64Ret;
6868}
6869#endif
6870
6871
6872/**
6873 * Fetches a data tword.
6874 *
6875 * @returns Strict VBox status code.
6876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6877 * @param pr80Dst Where to return the tword.
6878 * @param iSegReg The index of the segment register to use for
6879 * this access. The base and limits are checked.
6880 * @param GCPtrMem The address of the guest memory.
6881 */
6882VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6883{
6884 /* The lazy approach for now... */
6885 PCRTFLOAT80U pr80Src;
6886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem,
6887 IEM_ACCESS_DATA_R, 7 /** @todo FLD alignment check */ );
6888 if (rc == VINF_SUCCESS)
6889 {
6890 *pr80Dst = *pr80Src;
6891 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6892 }
6893 return rc;
6894}
6895
6896
6897#ifdef IEM_WITH_SETJMP
6898/**
6899 * Fetches a data tword, longjmp on error.
6900 *
6901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6902 * @param pr80Dst Where to return the tword.
6903 * @param iSegReg The index of the segment register to use for
6904 * this access. The base and limits are checked.
6905 * @param GCPtrMem The address of the guest memory.
6906 */
6907void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6908{
6909 /* The lazy approach for now... */
6910 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem,
6911 IEM_ACCESS_DATA_R, 7 /** @todo FLD alignment check */);
6912 *pr80Dst = *pr80Src;
6913 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6914}
6915#endif
6916
6917
6918/**
6919 * Fetches a data tword.
6920 *
6921 * @returns Strict VBox status code.
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 * @param pd80Dst Where to return the tword.
6924 * @param iSegReg The index of the segment register to use for
6925 * this access. The base and limits are checked.
6926 * @param GCPtrMem The address of the guest memory.
6927 */
6928VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6929{
6930 /* The lazy approach for now... */
6931 PCRTPBCD80U pd80Src;
6932 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
6933 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
6934 if (rc == VINF_SUCCESS)
6935 {
6936 *pd80Dst = *pd80Src;
6937 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6938 }
6939 return rc;
6940}
6941
6942
6943#ifdef IEM_WITH_SETJMP
6944/**
6945 * Fetches a data tword, longjmp on error.
6946 *
6947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6948 * @param pd80Dst Where to return the tword.
6949 * @param iSegReg The index of the segment register to use for
6950 * this access. The base and limits are checked.
6951 * @param GCPtrMem The address of the guest memory.
6952 */
6953void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6954{
6955 /* The lazy approach for now... */
6956 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
6957 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
6958 *pd80Dst = *pd80Src;
6959 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6960}
6961#endif
6962
6963
6964/**
6965 * Fetches a data dqword (double qword), generally SSE related.
6966 *
6967 * @returns Strict VBox status code.
6968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6969 * @param pu128Dst Where to return the qword.
6970 * @param iSegReg The index of the segment register to use for
6971 * this access. The base and limits are checked.
6972 * @param GCPtrMem The address of the guest memory.
6973 */
6974VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6975{
6976 /* The lazy approach for now... */
6977 PCRTUINT128U pu128Src;
6978 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
6979 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
6980 if (rc == VINF_SUCCESS)
6981 {
6982 pu128Dst->au64[0] = pu128Src->au64[0];
6983 pu128Dst->au64[1] = pu128Src->au64[1];
6984 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6985 }
6986 return rc;
6987}
6988
6989
6990#ifdef IEM_WITH_SETJMP
6991/**
6992 * Fetches a data dqword (double qword), generally SSE related.
6993 *
6994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6995 * @param pu128Dst Where to return the qword.
6996 * @param iSegReg The index of the segment register to use for
6997 * this access. The base and limits are checked.
6998 * @param GCPtrMem The address of the guest memory.
6999 */
7000void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7001{
7002 /* The lazy approach for now... */
7003 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7004 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7005 pu128Dst->au64[0] = pu128Src->au64[0];
7006 pu128Dst->au64[1] = pu128Src->au64[1];
7007 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7008}
7009#endif
7010
7011
7012/**
7013 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7014 * related.
7015 *
7016 * Raises \#GP(0) if not aligned.
7017 *
7018 * @returns Strict VBox status code.
7019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7020 * @param pu128Dst Where to return the qword.
7021 * @param iSegReg The index of the segment register to use for
7022 * this access. The base and limits are checked.
7023 * @param GCPtrMem The address of the guest memory.
7024 */
7025VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7026{
7027 /* The lazy approach for now... */
7028 PCRTUINT128U pu128Src;
7029 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7030 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7031 if (rc == VINF_SUCCESS)
7032 {
7033 pu128Dst->au64[0] = pu128Src->au64[0];
7034 pu128Dst->au64[1] = pu128Src->au64[1];
7035 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7036 }
7037 return rc;
7038}
7039
7040
7041#ifdef IEM_WITH_SETJMP
7042/**
7043 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7044 * related, longjmp on error.
7045 *
7046 * Raises \#GP(0) if not aligned.
7047 *
7048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7049 * @param pu128Dst Where to return the qword.
7050 * @param iSegReg The index of the segment register to use for
7051 * this access. The base and limits are checked.
7052 * @param GCPtrMem The address of the guest memory.
7053 */
7054void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7055{
7056 /* The lazy approach for now... */
7057 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7058 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7059 pu128Dst->au64[0] = pu128Src->au64[0];
7060 pu128Dst->au64[1] = pu128Src->au64[1];
7061 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7062}
7063#endif
7064
7065
7066/**
7067 * Fetches a data oword (octo word), generally AVX related.
7068 *
7069 * @returns Strict VBox status code.
7070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7071 * @param pu256Dst Where to return the qword.
7072 * @param iSegReg The index of the segment register to use for
7073 * this access. The base and limits are checked.
7074 * @param GCPtrMem The address of the guest memory.
7075 */
7076VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7077{
7078 /* The lazy approach for now... */
7079 PCRTUINT256U pu256Src;
7080 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7081 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7082 if (rc == VINF_SUCCESS)
7083 {
7084 pu256Dst->au64[0] = pu256Src->au64[0];
7085 pu256Dst->au64[1] = pu256Src->au64[1];
7086 pu256Dst->au64[2] = pu256Src->au64[2];
7087 pu256Dst->au64[3] = pu256Src->au64[3];
7088 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7089 }
7090 return rc;
7091}
7092
7093
7094#ifdef IEM_WITH_SETJMP
7095/**
7096 * Fetches a data oword (octo word), generally AVX related.
7097 *
7098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7099 * @param pu256Dst Where to return the qword.
7100 * @param iSegReg The index of the segment register to use for
7101 * this access. The base and limits are checked.
7102 * @param GCPtrMem The address of the guest memory.
7103 */
7104void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7105{
7106 /* The lazy approach for now... */
7107 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7108 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7109 pu256Dst->au64[0] = pu256Src->au64[0];
7110 pu256Dst->au64[1] = pu256Src->au64[1];
7111 pu256Dst->au64[2] = pu256Src->au64[2];
7112 pu256Dst->au64[3] = pu256Src->au64[3];
7113 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7114}
7115#endif
7116
7117
7118/**
7119 * Fetches a data oword (octo word) at an aligned address, generally AVX
7120 * related.
7121 *
7122 * Raises \#GP(0) if not aligned.
7123 *
7124 * @returns Strict VBox status code.
7125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7126 * @param pu256Dst Where to return the qword.
7127 * @param iSegReg The index of the segment register to use for
7128 * this access. The base and limits are checked.
7129 * @param GCPtrMem The address of the guest memory.
7130 */
7131VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7132{
7133 /* The lazy approach for now... */
7134 PCRTUINT256U pu256Src;
7135 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7136 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7137 if (rc == VINF_SUCCESS)
7138 {
7139 pu256Dst->au64[0] = pu256Src->au64[0];
7140 pu256Dst->au64[1] = pu256Src->au64[1];
7141 pu256Dst->au64[2] = pu256Src->au64[2];
7142 pu256Dst->au64[3] = pu256Src->au64[3];
7143 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7144 }
7145 return rc;
7146}
7147
7148
7149#ifdef IEM_WITH_SETJMP
7150/**
7151 * Fetches a data oword (octo word) at an aligned address, generally AVX
7152 * related, longjmp on error.
7153 *
7154 * Raises \#GP(0) if not aligned.
7155 *
7156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7157 * @param pu256Dst Where to return the qword.
7158 * @param iSegReg The index of the segment register to use for
7159 * this access. The base and limits are checked.
7160 * @param GCPtrMem The address of the guest memory.
7161 */
7162void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7163{
7164 /* The lazy approach for now... */
7165 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7166 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7167 pu256Dst->au64[0] = pu256Src->au64[0];
7168 pu256Dst->au64[1] = pu256Src->au64[1];
7169 pu256Dst->au64[2] = pu256Src->au64[2];
7170 pu256Dst->au64[3] = pu256Src->au64[3];
7171 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7172}
7173#endif
7174
7175
7176
7177/**
7178 * Fetches a descriptor register (lgdt, lidt).
7179 *
7180 * @returns Strict VBox status code.
7181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7182 * @param pcbLimit Where to return the limit.
7183 * @param pGCPtrBase Where to return the base.
7184 * @param iSegReg The index of the segment register to use for
7185 * this access. The base and limits are checked.
7186 * @param GCPtrMem The address of the guest memory.
7187 * @param enmOpSize The effective operand size.
7188 */
7189VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7190 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7191{
7192 /*
7193 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7194 * little special:
7195 * - The two reads are done separately.
7196 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7197 * - We suspect the 386 to actually commit the limit before the base in
7198 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7199 * don't try emulate this eccentric behavior, because it's not well
7200 * enough understood and rather hard to trigger.
7201 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7202 */
7203 VBOXSTRICTRC rcStrict;
7204 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7205 {
7206 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7207 if (rcStrict == VINF_SUCCESS)
7208 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7209 }
7210 else
7211 {
7212 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7213 if (enmOpSize == IEMMODE_32BIT)
7214 {
7215 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7216 {
7217 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7218 if (rcStrict == VINF_SUCCESS)
7219 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7220 }
7221 else
7222 {
7223 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7224 if (rcStrict == VINF_SUCCESS)
7225 {
7226 *pcbLimit = (uint16_t)uTmp;
7227 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7228 }
7229 }
7230 if (rcStrict == VINF_SUCCESS)
7231 *pGCPtrBase = uTmp;
7232 }
7233 else
7234 {
7235 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7236 if (rcStrict == VINF_SUCCESS)
7237 {
7238 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7239 if (rcStrict == VINF_SUCCESS)
7240 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7241 }
7242 }
7243 }
7244 return rcStrict;
7245}
7246
7247
7248
7249/**
7250 * Stores a data byte.
7251 *
7252 * @returns Strict VBox status code.
7253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7254 * @param iSegReg The index of the segment register to use for
7255 * this access. The base and limits are checked.
7256 * @param GCPtrMem The address of the guest memory.
7257 * @param u8Value The value to store.
7258 */
7259VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7260{
7261 /* The lazy approach for now... */
7262 uint8_t *pu8Dst;
7263 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7264 if (rc == VINF_SUCCESS)
7265 {
7266 *pu8Dst = u8Value;
7267 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7268 }
7269 return rc;
7270}
7271
7272
7273#ifdef IEM_WITH_SETJMP
7274/**
7275 * Stores a data byte, longjmp on error.
7276 *
7277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7278 * @param iSegReg The index of the segment register to use for
7279 * this access. The base and limits are checked.
7280 * @param GCPtrMem The address of the guest memory.
7281 * @param u8Value The value to store.
7282 */
7283void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7284{
7285 /* The lazy approach for now... */
7286 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7287 *pu8Dst = u8Value;
7288 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7289}
7290#endif
7291
7292
7293/**
7294 * Stores a data word.
7295 *
7296 * @returns Strict VBox status code.
7297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7298 * @param iSegReg The index of the segment register to use for
7299 * this access. The base and limits are checked.
7300 * @param GCPtrMem The address of the guest memory.
7301 * @param u16Value The value to store.
7302 */
7303VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7304{
7305 /* The lazy approach for now... */
7306 uint16_t *pu16Dst;
7307 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7308 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7309 if (rc == VINF_SUCCESS)
7310 {
7311 *pu16Dst = u16Value;
7312 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7313 }
7314 return rc;
7315}
7316
7317
7318#ifdef IEM_WITH_SETJMP
7319/**
7320 * Stores a data word, longjmp on error.
7321 *
7322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7323 * @param iSegReg The index of the segment register to use for
7324 * this access. The base and limits are checked.
7325 * @param GCPtrMem The address of the guest memory.
7326 * @param u16Value The value to store.
7327 */
7328void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7329{
7330 /* The lazy approach for now... */
7331 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7332 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7333 *pu16Dst = u16Value;
7334 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7335}
7336#endif
7337
7338
7339/**
7340 * Stores a data dword.
7341 *
7342 * @returns Strict VBox status code.
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param iSegReg The index of the segment register to use for
7345 * this access. The base and limits are checked.
7346 * @param GCPtrMem The address of the guest memory.
7347 * @param u32Value The value to store.
7348 */
7349VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7350{
7351 /* The lazy approach for now... */
7352 uint32_t *pu32Dst;
7353 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7354 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7355 if (rc == VINF_SUCCESS)
7356 {
7357 *pu32Dst = u32Value;
7358 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7359 }
7360 return rc;
7361}
7362
7363
7364#ifdef IEM_WITH_SETJMP
7365/**
7366 * Stores a data dword.
7367 *
7368 * @returns Strict VBox status code.
7369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7370 * @param iSegReg The index of the segment register to use for
7371 * this access. The base and limits are checked.
7372 * @param GCPtrMem The address of the guest memory.
7373 * @param u32Value The value to store.
7374 */
7375void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7376{
7377 /* The lazy approach for now... */
7378 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7379 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7380 *pu32Dst = u32Value;
7381 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7382}
7383#endif
7384
7385
7386/**
7387 * Stores a data qword.
7388 *
7389 * @returns Strict VBox status code.
7390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7391 * @param iSegReg The index of the segment register to use for
7392 * this access. The base and limits are checked.
7393 * @param GCPtrMem The address of the guest memory.
7394 * @param u64Value The value to store.
7395 */
7396VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7397{
7398 /* The lazy approach for now... */
7399 uint64_t *pu64Dst;
7400 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7401 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7402 if (rc == VINF_SUCCESS)
7403 {
7404 *pu64Dst = u64Value;
7405 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7406 }
7407 return rc;
7408}
7409
7410
7411#ifdef IEM_WITH_SETJMP
7412/**
7413 * Stores a data qword, longjmp on error.
7414 *
7415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7416 * @param iSegReg The index of the segment register to use for
7417 * this access. The base and limits are checked.
7418 * @param GCPtrMem The address of the guest memory.
7419 * @param u64Value The value to store.
7420 */
7421void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7422{
7423 /* The lazy approach for now... */
7424 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7425 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7426 *pu64Dst = u64Value;
7427 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7428}
7429#endif
7430
7431
7432/**
7433 * Stores a data dqword.
7434 *
7435 * @returns Strict VBox status code.
7436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7437 * @param iSegReg The index of the segment register to use for
7438 * this access. The base and limits are checked.
7439 * @param GCPtrMem The address of the guest memory.
7440 * @param u128Value The value to store.
7441 */
7442VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7443{
7444 /* The lazy approach for now... */
7445 PRTUINT128U pu128Dst;
7446 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7447 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7448 if (rc == VINF_SUCCESS)
7449 {
7450 pu128Dst->au64[0] = u128Value.au64[0];
7451 pu128Dst->au64[1] = u128Value.au64[1];
7452 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7453 }
7454 return rc;
7455}
7456
7457
7458#ifdef IEM_WITH_SETJMP
7459/**
7460 * Stores a data dqword, longjmp on error.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param iSegReg The index of the segment register to use for
7464 * this access. The base and limits are checked.
7465 * @param GCPtrMem The address of the guest memory.
7466 * @param u128Value The value to store.
7467 */
7468void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7469{
7470 /* The lazy approach for now... */
7471 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7472 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7473 pu128Dst->au64[0] = u128Value.au64[0];
7474 pu128Dst->au64[1] = u128Value.au64[1];
7475 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7476}
7477#endif
7478
7479
7480/**
7481 * Stores a data dqword, SSE aligned.
7482 *
7483 * @returns Strict VBox status code.
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param iSegReg The index of the segment register to use for
7486 * this access. The base and limits are checked.
7487 * @param GCPtrMem The address of the guest memory.
7488 * @param u128Value The value to store.
7489 */
7490VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7491{
7492 /* The lazy approach for now... */
7493 PRTUINT128U pu128Dst;
7494 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7495 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7496 if (rc == VINF_SUCCESS)
7497 {
7498 pu128Dst->au64[0] = u128Value.au64[0];
7499 pu128Dst->au64[1] = u128Value.au64[1];
7500 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7501 }
7502 return rc;
7503}
7504
7505
7506#ifdef IEM_WITH_SETJMP
7507/**
7508 * Stores a data dqword, SSE aligned.
7509 *
7510 * @returns Strict VBox status code.
7511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7512 * @param iSegReg The index of the segment register to use for
7513 * this access. The base and limits are checked.
7514 * @param GCPtrMem The address of the guest memory.
7515 * @param u128Value The value to store.
7516 */
7517void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7518{
7519 /* The lazy approach for now... */
7520 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7521 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7522 pu128Dst->au64[0] = u128Value.au64[0];
7523 pu128Dst->au64[1] = u128Value.au64[1];
7524 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7525}
7526#endif
7527
7528
7529/**
7530 * Stores a data dqword.
7531 *
7532 * @returns Strict VBox status code.
7533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7534 * @param iSegReg The index of the segment register to use for
7535 * this access. The base and limits are checked.
7536 * @param GCPtrMem The address of the guest memory.
7537 * @param pu256Value Pointer to the value to store.
7538 */
7539VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7540{
7541 /* The lazy approach for now... */
7542 PRTUINT256U pu256Dst;
7543 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7544 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7545 if (rc == VINF_SUCCESS)
7546 {
7547 pu256Dst->au64[0] = pu256Value->au64[0];
7548 pu256Dst->au64[1] = pu256Value->au64[1];
7549 pu256Dst->au64[2] = pu256Value->au64[2];
7550 pu256Dst->au64[3] = pu256Value->au64[3];
7551 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7552 }
7553 return rc;
7554}
7555
7556
7557#ifdef IEM_WITH_SETJMP
7558/**
7559 * Stores a data dqword, longjmp on error.
7560 *
7561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7562 * @param iSegReg The index of the segment register to use for
7563 * this access. The base and limits are checked.
7564 * @param GCPtrMem The address of the guest memory.
7565 * @param pu256Value Pointer to the value to store.
7566 */
7567void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7568{
7569 /* The lazy approach for now... */
7570 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7571 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7572 pu256Dst->au64[0] = pu256Value->au64[0];
7573 pu256Dst->au64[1] = pu256Value->au64[1];
7574 pu256Dst->au64[2] = pu256Value->au64[2];
7575 pu256Dst->au64[3] = pu256Value->au64[3];
7576 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7577}
7578#endif
7579
7580
7581/**
7582 * Stores a data dqword, AVX \#GP(0) aligned.
7583 *
7584 * @returns Strict VBox status code.
7585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7586 * @param iSegReg The index of the segment register to use for
7587 * this access. The base and limits are checked.
7588 * @param GCPtrMem The address of the guest memory.
7589 * @param pu256Value Pointer to the value to store.
7590 */
7591VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7592{
7593 /* The lazy approach for now... */
7594 PRTUINT256U pu256Dst;
7595 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7596 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7597 if (rc == VINF_SUCCESS)
7598 {
7599 pu256Dst->au64[0] = pu256Value->au64[0];
7600 pu256Dst->au64[1] = pu256Value->au64[1];
7601 pu256Dst->au64[2] = pu256Value->au64[2];
7602 pu256Dst->au64[3] = pu256Value->au64[3];
7603 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7604 }
7605 return rc;
7606}
7607
7608
7609#ifdef IEM_WITH_SETJMP
7610/**
7611 * Stores a data dqword, AVX aligned.
7612 *
7613 * @returns Strict VBox status code.
7614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7615 * @param iSegReg The index of the segment register to use for
7616 * this access. The base and limits are checked.
7617 * @param GCPtrMem The address of the guest memory.
7618 * @param pu256Value Pointer to the value to store.
7619 */
7620void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7621{
7622 /* The lazy approach for now... */
7623 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7624 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7625 pu256Dst->au64[0] = pu256Value->au64[0];
7626 pu256Dst->au64[1] = pu256Value->au64[1];
7627 pu256Dst->au64[2] = pu256Value->au64[2];
7628 pu256Dst->au64[3] = pu256Value->au64[3];
7629 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7630}
7631#endif
7632
7633
7634/**
7635 * Stores a descriptor register (sgdt, sidt).
7636 *
7637 * @returns Strict VBox status code.
7638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7639 * @param cbLimit The limit.
7640 * @param GCPtrBase The base address.
7641 * @param iSegReg The index of the segment register to use for
7642 * this access. The base and limits are checked.
7643 * @param GCPtrMem The address of the guest memory.
7644 */
7645VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7646{
7647 /*
7648 * The SIDT and SGDT instructions actually stores the data using two
7649 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7650 * does not respond to opsize prefixes.
7651 */
7652 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7653 if (rcStrict == VINF_SUCCESS)
7654 {
7655 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7656 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7657 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7658 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7659 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7660 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7661 else
7662 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7663 }
7664 return rcStrict;
7665}
7666
7667
7668/**
7669 * Pushes a word onto the stack.
7670 *
7671 * @returns Strict VBox status code.
7672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7673 * @param u16Value The value to push.
7674 */
7675VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7676{
7677 /* Increment the stack pointer. */
7678 uint64_t uNewRsp;
7679 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7680
7681 /* Write the word the lazy way. */
7682 uint16_t *pu16Dst;
7683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7684 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7685 if (rc == VINF_SUCCESS)
7686 {
7687 *pu16Dst = u16Value;
7688 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7689 }
7690
7691 /* Commit the new RSP value unless we an access handler made trouble. */
7692 if (rc == VINF_SUCCESS)
7693 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7694
7695 return rc;
7696}
7697
7698
7699/**
7700 * Pushes a dword onto the stack.
7701 *
7702 * @returns Strict VBox status code.
7703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7704 * @param u32Value The value to push.
7705 */
7706VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7707{
7708 /* Increment the stack pointer. */
7709 uint64_t uNewRsp;
7710 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7711
7712 /* Write the dword the lazy way. */
7713 uint32_t *pu32Dst;
7714 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7715 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7716 if (rc == VINF_SUCCESS)
7717 {
7718 *pu32Dst = u32Value;
7719 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7720 }
7721
7722 /* Commit the new RSP value unless we an access handler made trouble. */
7723 if (rc == VINF_SUCCESS)
7724 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7725
7726 return rc;
7727}
7728
7729
7730/**
7731 * Pushes a dword segment register value onto the stack.
7732 *
7733 * @returns Strict VBox status code.
7734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7735 * @param u32Value The value to push.
7736 */
7737VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7738{
7739 /* Increment the stack pointer. */
7740 uint64_t uNewRsp;
7741 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7742
7743 /* The intel docs talks about zero extending the selector register
7744 value. My actual intel CPU here might be zero extending the value
7745 but it still only writes the lower word... */
7746 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7747 * happens when crossing an electric page boundrary, is the high word checked
7748 * for write accessibility or not? Probably it is. What about segment limits?
7749 * It appears this behavior is also shared with trap error codes.
7750 *
7751 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7752 * ancient hardware when it actually did change. */
7753 uint16_t *pu16Dst;
7754 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7755 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7756 if (rc == VINF_SUCCESS)
7757 {
7758 *pu16Dst = (uint16_t)u32Value;
7759 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7760 }
7761
7762 /* Commit the new RSP value unless we an access handler made trouble. */
7763 if (rc == VINF_SUCCESS)
7764 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7765
7766 return rc;
7767}
7768
7769
7770/**
7771 * Pushes a qword onto the stack.
7772 *
7773 * @returns Strict VBox status code.
7774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7775 * @param u64Value The value to push.
7776 */
7777VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7778{
7779 /* Increment the stack pointer. */
7780 uint64_t uNewRsp;
7781 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7782
7783 /* Write the word the lazy way. */
7784 uint64_t *pu64Dst;
7785 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7786 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7787 if (rc == VINF_SUCCESS)
7788 {
7789 *pu64Dst = u64Value;
7790 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7791 }
7792
7793 /* Commit the new RSP value unless we an access handler made trouble. */
7794 if (rc == VINF_SUCCESS)
7795 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7796
7797 return rc;
7798}
7799
7800
7801/**
7802 * Pops a word from the stack.
7803 *
7804 * @returns Strict VBox status code.
7805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7806 * @param pu16Value Where to store the popped value.
7807 */
7808VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7809{
7810 /* Increment the stack pointer. */
7811 uint64_t uNewRsp;
7812 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7813
7814 /* Write the word the lazy way. */
7815 uint16_t const *pu16Src;
7816 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7817 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7818 if (rc == VINF_SUCCESS)
7819 {
7820 *pu16Value = *pu16Src;
7821 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7822
7823 /* Commit the new RSP value. */
7824 if (rc == VINF_SUCCESS)
7825 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7826 }
7827
7828 return rc;
7829}
7830
7831
7832/**
7833 * Pops a dword from the stack.
7834 *
7835 * @returns Strict VBox status code.
7836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7837 * @param pu32Value Where to store the popped value.
7838 */
7839VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7840{
7841 /* Increment the stack pointer. */
7842 uint64_t uNewRsp;
7843 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7844
7845 /* Write the word the lazy way. */
7846 uint32_t const *pu32Src;
7847 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
7848 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
7849 if (rc == VINF_SUCCESS)
7850 {
7851 *pu32Value = *pu32Src;
7852 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7853
7854 /* Commit the new RSP value. */
7855 if (rc == VINF_SUCCESS)
7856 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7857 }
7858
7859 return rc;
7860}
7861
7862
7863/**
7864 * Pops a qword from the stack.
7865 *
7866 * @returns Strict VBox status code.
7867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7868 * @param pu64Value Where to store the popped value.
7869 */
7870VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7871{
7872 /* Increment the stack pointer. */
7873 uint64_t uNewRsp;
7874 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7875
7876 /* Write the word the lazy way. */
7877 uint64_t const *pu64Src;
7878 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
7879 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
7880 if (rc == VINF_SUCCESS)
7881 {
7882 *pu64Value = *pu64Src;
7883 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7884
7885 /* Commit the new RSP value. */
7886 if (rc == VINF_SUCCESS)
7887 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7888 }
7889
7890 return rc;
7891}
7892
7893
7894/**
7895 * Pushes a word onto the stack, using a temporary stack pointer.
7896 *
7897 * @returns Strict VBox status code.
7898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7899 * @param u16Value The value to push.
7900 * @param pTmpRsp Pointer to the temporary stack pointer.
7901 */
7902VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7903{
7904 /* Increment the stack pointer. */
7905 RTUINT64U NewRsp = *pTmpRsp;
7906 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
7907
7908 /* Write the word the lazy way. */
7909 uint16_t *pu16Dst;
7910 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7911 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7912 if (rc == VINF_SUCCESS)
7913 {
7914 *pu16Dst = u16Value;
7915 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7916 }
7917
7918 /* Commit the new RSP value unless we an access handler made trouble. */
7919 if (rc == VINF_SUCCESS)
7920 *pTmpRsp = NewRsp;
7921
7922 return rc;
7923}
7924
7925
7926/**
7927 * Pushes a dword onto the stack, using a temporary stack pointer.
7928 *
7929 * @returns Strict VBox status code.
7930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7931 * @param u32Value The value to push.
7932 * @param pTmpRsp Pointer to the temporary stack pointer.
7933 */
7934VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7935{
7936 /* Increment the stack pointer. */
7937 RTUINT64U NewRsp = *pTmpRsp;
7938 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
7939
7940 /* Write the word the lazy way. */
7941 uint32_t *pu32Dst;
7942 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7943 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7944 if (rc == VINF_SUCCESS)
7945 {
7946 *pu32Dst = u32Value;
7947 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7948 }
7949
7950 /* Commit the new RSP value unless we an access handler made trouble. */
7951 if (rc == VINF_SUCCESS)
7952 *pTmpRsp = NewRsp;
7953
7954 return rc;
7955}
7956
7957
7958/**
7959 * Pushes a dword onto the stack, using a temporary stack pointer.
7960 *
7961 * @returns Strict VBox status code.
7962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7963 * @param u64Value The value to push.
7964 * @param pTmpRsp Pointer to the temporary stack pointer.
7965 */
7966VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7967{
7968 /* Increment the stack pointer. */
7969 RTUINT64U NewRsp = *pTmpRsp;
7970 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
7971
7972 /* Write the word the lazy way. */
7973 uint64_t *pu64Dst;
7974 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7975 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7976 if (rc == VINF_SUCCESS)
7977 {
7978 *pu64Dst = u64Value;
7979 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7980 }
7981
7982 /* Commit the new RSP value unless we an access handler made trouble. */
7983 if (rc == VINF_SUCCESS)
7984 *pTmpRsp = NewRsp;
7985
7986 return rc;
7987}
7988
7989
7990/**
7991 * Pops a word from the stack, using a temporary stack pointer.
7992 *
7993 * @returns Strict VBox status code.
7994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7995 * @param pu16Value Where to store the popped value.
7996 * @param pTmpRsp Pointer to the temporary stack pointer.
7997 */
7998VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7999{
8000 /* Increment the stack pointer. */
8001 RTUINT64U NewRsp = *pTmpRsp;
8002 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8003
8004 /* Write the word the lazy way. */
8005 uint16_t const *pu16Src;
8006 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8007 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8008 if (rc == VINF_SUCCESS)
8009 {
8010 *pu16Value = *pu16Src;
8011 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8012
8013 /* Commit the new RSP value. */
8014 if (rc == VINF_SUCCESS)
8015 *pTmpRsp = NewRsp;
8016 }
8017
8018 return rc;
8019}
8020
8021
8022/**
8023 * Pops a dword from the stack, using a temporary stack pointer.
8024 *
8025 * @returns Strict VBox status code.
8026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8027 * @param pu32Value Where to store the popped value.
8028 * @param pTmpRsp Pointer to the temporary stack pointer.
8029 */
8030VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8031{
8032 /* Increment the stack pointer. */
8033 RTUINT64U NewRsp = *pTmpRsp;
8034 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8035
8036 /* Write the word the lazy way. */
8037 uint32_t const *pu32Src;
8038 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8039 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8040 if (rc == VINF_SUCCESS)
8041 {
8042 *pu32Value = *pu32Src;
8043 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8044
8045 /* Commit the new RSP value. */
8046 if (rc == VINF_SUCCESS)
8047 *pTmpRsp = NewRsp;
8048 }
8049
8050 return rc;
8051}
8052
8053
8054/**
8055 * Pops a qword from the stack, using a temporary stack pointer.
8056 *
8057 * @returns Strict VBox status code.
8058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8059 * @param pu64Value Where to store the popped value.
8060 * @param pTmpRsp Pointer to the temporary stack pointer.
8061 */
8062VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8063{
8064 /* Increment the stack pointer. */
8065 RTUINT64U NewRsp = *pTmpRsp;
8066 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8067
8068 /* Write the word the lazy way. */
8069 uint64_t const *pu64Src;
8070 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8071 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8072 if (rcStrict == VINF_SUCCESS)
8073 {
8074 *pu64Value = *pu64Src;
8075 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8076
8077 /* Commit the new RSP value. */
8078 if (rcStrict == VINF_SUCCESS)
8079 *pTmpRsp = NewRsp;
8080 }
8081
8082 return rcStrict;
8083}
8084
8085
8086/**
8087 * Begin a special stack push (used by interrupt, exceptions and such).
8088 *
8089 * This will raise \#SS or \#PF if appropriate.
8090 *
8091 * @returns Strict VBox status code.
8092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8093 * @param cbMem The number of bytes to push onto the stack.
8094 * @param cbAlign The alignment mask (7, 3, 1).
8095 * @param ppvMem Where to return the pointer to the stack memory.
8096 * As with the other memory functions this could be
8097 * direct access or bounce buffered access, so
8098 * don't commit register until the commit call
8099 * succeeds.
8100 * @param puNewRsp Where to return the new RSP value. This must be
8101 * passed unchanged to
8102 * iemMemStackPushCommitSpecial().
8103 */
8104VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8105 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8106{
8107 Assert(cbMem < UINT8_MAX);
8108 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8109 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8110 IEM_ACCESS_STACK_W, cbAlign);
8111}
8112
8113
8114/**
8115 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8116 *
8117 * This will update the rSP.
8118 *
8119 * @returns Strict VBox status code.
8120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8121 * @param pvMem The pointer returned by
8122 * iemMemStackPushBeginSpecial().
8123 * @param uNewRsp The new RSP value returned by
8124 * iemMemStackPushBeginSpecial().
8125 */
8126VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8127{
8128 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8129 if (rcStrict == VINF_SUCCESS)
8130 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8131 return rcStrict;
8132}
8133
8134
8135/**
8136 * Begin a special stack pop (used by iret, retf and such).
8137 *
8138 * This will raise \#SS or \#PF if appropriate.
8139 *
8140 * @returns Strict VBox status code.
8141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8142 * @param cbMem The number of bytes to pop from the stack.
8143 * @param cbAlign The alignment mask (7, 3, 1).
8144 * @param ppvMem Where to return the pointer to the stack memory.
8145 * @param puNewRsp Where to return the new RSP value. This must be
8146 * assigned to CPUMCTX::rsp manually some time
8147 * after iemMemStackPopDoneSpecial() has been
8148 * called.
8149 */
8150VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8151 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8152{
8153 Assert(cbMem < UINT8_MAX);
8154 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8155 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8156}
8157
8158
8159/**
8160 * Continue a special stack pop (used by iret and retf).
8161 *
8162 * This will raise \#SS or \#PF if appropriate.
8163 *
8164 * @returns Strict VBox status code.
8165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8166 * @param cbMem The number of bytes to pop from the stack.
8167 * @param ppvMem Where to return the pointer to the stack memory.
8168 * @param puNewRsp Where to return the new RSP value. This must be
8169 * assigned to CPUMCTX::rsp manually some time
8170 * after iemMemStackPopDoneSpecial() has been
8171 * called.
8172 */
8173VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8174{
8175 Assert(cbMem < UINT8_MAX);
8176 RTUINT64U NewRsp;
8177 NewRsp.u = *puNewRsp;
8178 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8179 *puNewRsp = NewRsp.u;
8180 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R,
8181 0 /* checked in iemMemStackPopBeginSpecial */);
8182}
8183
8184
8185/**
8186 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8187 * iemMemStackPopContinueSpecial).
8188 *
8189 * The caller will manually commit the rSP.
8190 *
8191 * @returns Strict VBox status code.
8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8193 * @param pvMem The pointer returned by
8194 * iemMemStackPopBeginSpecial() or
8195 * iemMemStackPopContinueSpecial().
8196 */
8197VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8198{
8199 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8200}
8201
8202
8203/**
8204 * Fetches a system table byte.
8205 *
8206 * @returns Strict VBox status code.
8207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8208 * @param pbDst Where to return the byte.
8209 * @param iSegReg The index of the segment register to use for
8210 * this access. The base and limits are checked.
8211 * @param GCPtrMem The address of the guest memory.
8212 */
8213VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8214{
8215 /* The lazy approach for now... */
8216 uint8_t const *pbSrc;
8217 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8218 if (rc == VINF_SUCCESS)
8219 {
8220 *pbDst = *pbSrc;
8221 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8222 }
8223 return rc;
8224}
8225
8226
8227/**
8228 * Fetches a system table word.
8229 *
8230 * @returns Strict VBox status code.
8231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8232 * @param pu16Dst Where to return the word.
8233 * @param iSegReg The index of the segment register to use for
8234 * this access. The base and limits are checked.
8235 * @param GCPtrMem The address of the guest memory.
8236 */
8237VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8238{
8239 /* The lazy approach for now... */
8240 uint16_t const *pu16Src;
8241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8242 if (rc == VINF_SUCCESS)
8243 {
8244 *pu16Dst = *pu16Src;
8245 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8246 }
8247 return rc;
8248}
8249
8250
8251/**
8252 * Fetches a system table dword.
8253 *
8254 * @returns Strict VBox status code.
8255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8256 * @param pu32Dst Where to return the dword.
8257 * @param iSegReg The index of the segment register to use for
8258 * this access. The base and limits are checked.
8259 * @param GCPtrMem The address of the guest memory.
8260 */
8261VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8262{
8263 /* The lazy approach for now... */
8264 uint32_t const *pu32Src;
8265 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8266 if (rc == VINF_SUCCESS)
8267 {
8268 *pu32Dst = *pu32Src;
8269 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8270 }
8271 return rc;
8272}
8273
8274
8275/**
8276 * Fetches a system table qword.
8277 *
8278 * @returns Strict VBox status code.
8279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8280 * @param pu64Dst Where to return the qword.
8281 * @param iSegReg The index of the segment register to use for
8282 * this access. The base and limits are checked.
8283 * @param GCPtrMem The address of the guest memory.
8284 */
8285VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8286{
8287 /* The lazy approach for now... */
8288 uint64_t const *pu64Src;
8289 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8290 if (rc == VINF_SUCCESS)
8291 {
8292 *pu64Dst = *pu64Src;
8293 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8294 }
8295 return rc;
8296}
8297
8298
8299/**
8300 * Fetches a descriptor table entry with caller specified error code.
8301 *
8302 * @returns Strict VBox status code.
8303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8304 * @param pDesc Where to return the descriptor table entry.
8305 * @param uSel The selector which table entry to fetch.
8306 * @param uXcpt The exception to raise on table lookup error.
8307 * @param uErrorCode The error code associated with the exception.
8308 */
8309static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8310 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8311{
8312 AssertPtr(pDesc);
8313 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8314
8315 /** @todo did the 286 require all 8 bytes to be accessible? */
8316 /*
8317 * Get the selector table base and check bounds.
8318 */
8319 RTGCPTR GCPtrBase;
8320 if (uSel & X86_SEL_LDT)
8321 {
8322 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8323 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8324 {
8325 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8326 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8327 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8328 uErrorCode, 0);
8329 }
8330
8331 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8332 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8333 }
8334 else
8335 {
8336 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8337 {
8338 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8339 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8340 uErrorCode, 0);
8341 }
8342 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8343 }
8344
8345 /*
8346 * Read the legacy descriptor and maybe the long mode extensions if
8347 * required.
8348 */
8349 VBOXSTRICTRC rcStrict;
8350 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8351 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8352 else
8353 {
8354 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8355 if (rcStrict == VINF_SUCCESS)
8356 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8357 if (rcStrict == VINF_SUCCESS)
8358 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8359 if (rcStrict == VINF_SUCCESS)
8360 pDesc->Legacy.au16[3] = 0;
8361 else
8362 return rcStrict;
8363 }
8364
8365 if (rcStrict == VINF_SUCCESS)
8366 {
8367 if ( !IEM_IS_LONG_MODE(pVCpu)
8368 || pDesc->Legacy.Gen.u1DescType)
8369 pDesc->Long.au64[1] = 0;
8370 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8371 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8372 else
8373 {
8374 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8375 /** @todo is this the right exception? */
8376 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8377 }
8378 }
8379 return rcStrict;
8380}
8381
8382
8383/**
8384 * Fetches a descriptor table entry.
8385 *
8386 * @returns Strict VBox status code.
8387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8388 * @param pDesc Where to return the descriptor table entry.
8389 * @param uSel The selector which table entry to fetch.
8390 * @param uXcpt The exception to raise on table lookup error.
8391 */
8392VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8393{
8394 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8395}
8396
8397
8398/**
8399 * Marks the selector descriptor as accessed (only non-system descriptors).
8400 *
8401 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8402 * will therefore skip the limit checks.
8403 *
8404 * @returns Strict VBox status code.
8405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8406 * @param uSel The selector.
8407 */
8408VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8409{
8410 /*
8411 * Get the selector table base and calculate the entry address.
8412 */
8413 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8414 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8415 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8416 GCPtr += uSel & X86_SEL_MASK;
8417
8418 /*
8419 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8420 * ugly stuff to avoid this. This will make sure it's an atomic access
8421 * as well more or less remove any question about 8-bit or 32-bit accesss.
8422 */
8423 VBOXSTRICTRC rcStrict;
8424 uint32_t volatile *pu32;
8425 if ((GCPtr & 3) == 0)
8426 {
8427 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8428 GCPtr += 2 + 2;
8429 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8430 if (rcStrict != VINF_SUCCESS)
8431 return rcStrict;
8432 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8433 }
8434 else
8435 {
8436 /* The misaligned GDT/LDT case, map the whole thing. */
8437 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8438 if (rcStrict != VINF_SUCCESS)
8439 return rcStrict;
8440 switch ((uintptr_t)pu32 & 3)
8441 {
8442 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8443 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8444 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8445 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8446 }
8447 }
8448
8449 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8450}
8451
8452/** @} */
8453
8454/** @name Opcode Helpers.
8455 * @{
8456 */
8457
8458/**
8459 * Calculates the effective address of a ModR/M memory operand.
8460 *
8461 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8462 *
8463 * @return Strict VBox status code.
8464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8465 * @param bRm The ModRM byte.
8466 * @param cbImm The size of any immediate following the
8467 * effective address opcode bytes. Important for
8468 * RIP relative addressing.
8469 * @param pGCPtrEff Where to return the effective address.
8470 */
8471VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8472{
8473 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8474# define SET_SS_DEF() \
8475 do \
8476 { \
8477 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8478 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8479 } while (0)
8480
8481 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8482 {
8483/** @todo Check the effective address size crap! */
8484 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8485 {
8486 uint16_t u16EffAddr;
8487
8488 /* Handle the disp16 form with no registers first. */
8489 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8490 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8491 else
8492 {
8493 /* Get the displacment. */
8494 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8495 {
8496 case 0: u16EffAddr = 0; break;
8497 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8498 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8499 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8500 }
8501
8502 /* Add the base and index registers to the disp. */
8503 switch (bRm & X86_MODRM_RM_MASK)
8504 {
8505 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8506 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8507 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8508 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8509 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8510 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8511 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8512 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8513 }
8514 }
8515
8516 *pGCPtrEff = u16EffAddr;
8517 }
8518 else
8519 {
8520 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8521 uint32_t u32EffAddr;
8522
8523 /* Handle the disp32 form with no registers first. */
8524 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8525 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8526 else
8527 {
8528 /* Get the register (or SIB) value. */
8529 switch ((bRm & X86_MODRM_RM_MASK))
8530 {
8531 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8532 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8533 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8534 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8535 case 4: /* SIB */
8536 {
8537 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8538
8539 /* Get the index and scale it. */
8540 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8541 {
8542 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8543 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8544 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8545 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8546 case 4: u32EffAddr = 0; /*none */ break;
8547 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8548 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8549 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8550 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8551 }
8552 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8553
8554 /* add base */
8555 switch (bSib & X86_SIB_BASE_MASK)
8556 {
8557 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8558 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8559 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8560 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8561 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8562 case 5:
8563 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8564 {
8565 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8566 SET_SS_DEF();
8567 }
8568 else
8569 {
8570 uint32_t u32Disp;
8571 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8572 u32EffAddr += u32Disp;
8573 }
8574 break;
8575 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8576 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8577 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8578 }
8579 break;
8580 }
8581 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8582 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8583 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8584 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8585 }
8586
8587 /* Get and add the displacement. */
8588 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8589 {
8590 case 0:
8591 break;
8592 case 1:
8593 {
8594 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8595 u32EffAddr += i8Disp;
8596 break;
8597 }
8598 case 2:
8599 {
8600 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8601 u32EffAddr += u32Disp;
8602 break;
8603 }
8604 default:
8605 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8606 }
8607
8608 }
8609 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8610 *pGCPtrEff = u32EffAddr;
8611 else
8612 {
8613 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8614 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8615 }
8616 }
8617 }
8618 else
8619 {
8620 uint64_t u64EffAddr;
8621
8622 /* Handle the rip+disp32 form with no registers first. */
8623 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8624 {
8625 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8626 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8627 }
8628 else
8629 {
8630 /* Get the register (or SIB) value. */
8631 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8632 {
8633 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8634 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8635 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8636 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8637 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8638 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8639 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8640 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8641 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8642 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8643 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8644 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8645 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8646 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8647 /* SIB */
8648 case 4:
8649 case 12:
8650 {
8651 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8652
8653 /* Get the index and scale it. */
8654 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8655 {
8656 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8657 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8658 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8659 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8660 case 4: u64EffAddr = 0; /*none */ break;
8661 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8662 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8663 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8664 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8665 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8666 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8667 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8668 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8669 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8670 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8671 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8672 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8673 }
8674 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8675
8676 /* add base */
8677 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8678 {
8679 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8680 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8681 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8682 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8683 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8684 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8685 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8686 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8687 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8688 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8689 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8690 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8691 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8692 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8693 /* complicated encodings */
8694 case 5:
8695 case 13:
8696 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8697 {
8698 if (!pVCpu->iem.s.uRexB)
8699 {
8700 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8701 SET_SS_DEF();
8702 }
8703 else
8704 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8705 }
8706 else
8707 {
8708 uint32_t u32Disp;
8709 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8710 u64EffAddr += (int32_t)u32Disp;
8711 }
8712 break;
8713 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8714 }
8715 break;
8716 }
8717 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8718 }
8719
8720 /* Get and add the displacement. */
8721 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8722 {
8723 case 0:
8724 break;
8725 case 1:
8726 {
8727 int8_t i8Disp;
8728 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8729 u64EffAddr += i8Disp;
8730 break;
8731 }
8732 case 2:
8733 {
8734 uint32_t u32Disp;
8735 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8736 u64EffAddr += (int32_t)u32Disp;
8737 break;
8738 }
8739 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8740 }
8741
8742 }
8743
8744 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8745 *pGCPtrEff = u64EffAddr;
8746 else
8747 {
8748 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8749 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8750 }
8751 }
8752
8753 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8754 return VINF_SUCCESS;
8755}
8756
8757
8758/**
8759 * Calculates the effective address of a ModR/M memory operand.
8760 *
8761 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8762 *
8763 * @return Strict VBox status code.
8764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8765 * @param bRm The ModRM byte.
8766 * @param cbImm The size of any immediate following the
8767 * effective address opcode bytes. Important for
8768 * RIP relative addressing.
8769 * @param pGCPtrEff Where to return the effective address.
8770 * @param offRsp RSP displacement.
8771 */
8772VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8773{
8774 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8775# define SET_SS_DEF() \
8776 do \
8777 { \
8778 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8779 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8780 } while (0)
8781
8782 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8783 {
8784/** @todo Check the effective address size crap! */
8785 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8786 {
8787 uint16_t u16EffAddr;
8788
8789 /* Handle the disp16 form with no registers first. */
8790 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8791 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8792 else
8793 {
8794 /* Get the displacment. */
8795 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8796 {
8797 case 0: u16EffAddr = 0; break;
8798 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8799 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8800 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8801 }
8802
8803 /* Add the base and index registers to the disp. */
8804 switch (bRm & X86_MODRM_RM_MASK)
8805 {
8806 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8807 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8808 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8809 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8810 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8811 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8812 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8813 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8814 }
8815 }
8816
8817 *pGCPtrEff = u16EffAddr;
8818 }
8819 else
8820 {
8821 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8822 uint32_t u32EffAddr;
8823
8824 /* Handle the disp32 form with no registers first. */
8825 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8826 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8827 else
8828 {
8829 /* Get the register (or SIB) value. */
8830 switch ((bRm & X86_MODRM_RM_MASK))
8831 {
8832 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8833 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8834 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8835 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8836 case 4: /* SIB */
8837 {
8838 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8839
8840 /* Get the index and scale it. */
8841 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8842 {
8843 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8844 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8845 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8846 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8847 case 4: u32EffAddr = 0; /*none */ break;
8848 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8849 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8850 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8851 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8852 }
8853 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8854
8855 /* add base */
8856 switch (bSib & X86_SIB_BASE_MASK)
8857 {
8858 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8859 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8860 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8861 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8862 case 4:
8863 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8864 SET_SS_DEF();
8865 break;
8866 case 5:
8867 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8868 {
8869 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8870 SET_SS_DEF();
8871 }
8872 else
8873 {
8874 uint32_t u32Disp;
8875 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8876 u32EffAddr += u32Disp;
8877 }
8878 break;
8879 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8880 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8881 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8882 }
8883 break;
8884 }
8885 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8886 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8887 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8888 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8889 }
8890
8891 /* Get and add the displacement. */
8892 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8893 {
8894 case 0:
8895 break;
8896 case 1:
8897 {
8898 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8899 u32EffAddr += i8Disp;
8900 break;
8901 }
8902 case 2:
8903 {
8904 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8905 u32EffAddr += u32Disp;
8906 break;
8907 }
8908 default:
8909 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8910 }
8911
8912 }
8913 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8914 *pGCPtrEff = u32EffAddr;
8915 else
8916 {
8917 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8918 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8919 }
8920 }
8921 }
8922 else
8923 {
8924 uint64_t u64EffAddr;
8925
8926 /* Handle the rip+disp32 form with no registers first. */
8927 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8928 {
8929 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8930 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8931 }
8932 else
8933 {
8934 /* Get the register (or SIB) value. */
8935 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8936 {
8937 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8938 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8939 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8940 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8941 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8942 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8943 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8944 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8945 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8946 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8947 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8948 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8949 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8950 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8951 /* SIB */
8952 case 4:
8953 case 12:
8954 {
8955 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8956
8957 /* Get the index and scale it. */
8958 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8959 {
8960 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8961 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8962 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8963 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8964 case 4: u64EffAddr = 0; /*none */ break;
8965 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8966 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8967 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8968 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8969 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8970 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8971 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8972 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8973 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8974 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8975 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8976 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8977 }
8978 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8979
8980 /* add base */
8981 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8982 {
8983 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8984 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8985 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8986 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8987 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
8988 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8989 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8990 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8991 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8992 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8993 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8994 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8995 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8996 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8997 /* complicated encodings */
8998 case 5:
8999 case 13:
9000 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9001 {
9002 if (!pVCpu->iem.s.uRexB)
9003 {
9004 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9005 SET_SS_DEF();
9006 }
9007 else
9008 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9009 }
9010 else
9011 {
9012 uint32_t u32Disp;
9013 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9014 u64EffAddr += (int32_t)u32Disp;
9015 }
9016 break;
9017 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9018 }
9019 break;
9020 }
9021 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9022 }
9023
9024 /* Get and add the displacement. */
9025 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9026 {
9027 case 0:
9028 break;
9029 case 1:
9030 {
9031 int8_t i8Disp;
9032 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9033 u64EffAddr += i8Disp;
9034 break;
9035 }
9036 case 2:
9037 {
9038 uint32_t u32Disp;
9039 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9040 u64EffAddr += (int32_t)u32Disp;
9041 break;
9042 }
9043 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9044 }
9045
9046 }
9047
9048 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9049 *pGCPtrEff = u64EffAddr;
9050 else
9051 {
9052 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9053 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9054 }
9055 }
9056
9057 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9058 return VINF_SUCCESS;
9059}
9060
9061
9062#ifdef IEM_WITH_SETJMP
9063/**
9064 * Calculates the effective address of a ModR/M memory operand.
9065 *
9066 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9067 *
9068 * May longjmp on internal error.
9069 *
9070 * @return The effective address.
9071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9072 * @param bRm The ModRM byte.
9073 * @param cbImm The size of any immediate following the
9074 * effective address opcode bytes. Important for
9075 * RIP relative addressing.
9076 */
9077RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9078{
9079 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9080# define SET_SS_DEF() \
9081 do \
9082 { \
9083 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9084 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9085 } while (0)
9086
9087 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9088 {
9089/** @todo Check the effective address size crap! */
9090 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9091 {
9092 uint16_t u16EffAddr;
9093
9094 /* Handle the disp16 form with no registers first. */
9095 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9096 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9097 else
9098 {
9099 /* Get the displacment. */
9100 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9101 {
9102 case 0: u16EffAddr = 0; break;
9103 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9104 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9105 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9106 }
9107
9108 /* Add the base and index registers to the disp. */
9109 switch (bRm & X86_MODRM_RM_MASK)
9110 {
9111 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9112 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9113 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9114 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9115 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9116 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9117 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9118 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9119 }
9120 }
9121
9122 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9123 return u16EffAddr;
9124 }
9125
9126 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9127 uint32_t u32EffAddr;
9128
9129 /* Handle the disp32 form with no registers first. */
9130 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9131 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9132 else
9133 {
9134 /* Get the register (or SIB) value. */
9135 switch ((bRm & X86_MODRM_RM_MASK))
9136 {
9137 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9138 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9139 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9140 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9141 case 4: /* SIB */
9142 {
9143 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9144
9145 /* Get the index and scale it. */
9146 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9147 {
9148 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9149 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9150 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9151 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9152 case 4: u32EffAddr = 0; /*none */ break;
9153 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9154 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9155 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9156 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9157 }
9158 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9159
9160 /* add base */
9161 switch (bSib & X86_SIB_BASE_MASK)
9162 {
9163 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9164 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9165 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9166 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9167 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9168 case 5:
9169 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9170 {
9171 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9172 SET_SS_DEF();
9173 }
9174 else
9175 {
9176 uint32_t u32Disp;
9177 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9178 u32EffAddr += u32Disp;
9179 }
9180 break;
9181 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9182 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9183 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9184 }
9185 break;
9186 }
9187 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9188 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9189 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9190 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9191 }
9192
9193 /* Get and add the displacement. */
9194 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9195 {
9196 case 0:
9197 break;
9198 case 1:
9199 {
9200 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9201 u32EffAddr += i8Disp;
9202 break;
9203 }
9204 case 2:
9205 {
9206 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9207 u32EffAddr += u32Disp;
9208 break;
9209 }
9210 default:
9211 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9212 }
9213 }
9214
9215 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9216 {
9217 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9218 return u32EffAddr;
9219 }
9220 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9221 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9222 return u32EffAddr & UINT16_MAX;
9223 }
9224
9225 uint64_t u64EffAddr;
9226
9227 /* Handle the rip+disp32 form with no registers first. */
9228 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9229 {
9230 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9231 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9232 }
9233 else
9234 {
9235 /* Get the register (or SIB) value. */
9236 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9237 {
9238 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9239 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9240 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9241 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9242 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9243 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9244 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9245 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9246 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9247 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9248 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9249 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9250 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9251 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9252 /* SIB */
9253 case 4:
9254 case 12:
9255 {
9256 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9257
9258 /* Get the index and scale it. */
9259 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9260 {
9261 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9262 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9263 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9264 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9265 case 4: u64EffAddr = 0; /*none */ break;
9266 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9267 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9268 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9269 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9270 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9271 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9272 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9273 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9274 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9275 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9276 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9277 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9278 }
9279 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9280
9281 /* add base */
9282 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9283 {
9284 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9285 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9286 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9287 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9288 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9289 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9290 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9291 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9292 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9293 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9294 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9295 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9296 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9297 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9298 /* complicated encodings */
9299 case 5:
9300 case 13:
9301 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9302 {
9303 if (!pVCpu->iem.s.uRexB)
9304 {
9305 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9306 SET_SS_DEF();
9307 }
9308 else
9309 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9310 }
9311 else
9312 {
9313 uint32_t u32Disp;
9314 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9315 u64EffAddr += (int32_t)u32Disp;
9316 }
9317 break;
9318 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9319 }
9320 break;
9321 }
9322 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9323 }
9324
9325 /* Get and add the displacement. */
9326 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9327 {
9328 case 0:
9329 break;
9330 case 1:
9331 {
9332 int8_t i8Disp;
9333 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9334 u64EffAddr += i8Disp;
9335 break;
9336 }
9337 case 2:
9338 {
9339 uint32_t u32Disp;
9340 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9341 u64EffAddr += (int32_t)u32Disp;
9342 break;
9343 }
9344 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9345 }
9346
9347 }
9348
9349 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9350 {
9351 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9352 return u64EffAddr;
9353 }
9354 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9355 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9356 return u64EffAddr & UINT32_MAX;
9357}
9358#endif /* IEM_WITH_SETJMP */
9359
9360/** @} */
9361
9362
9363#ifdef LOG_ENABLED
9364/**
9365 * Logs the current instruction.
9366 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9367 * @param fSameCtx Set if we have the same context information as the VMM,
9368 * clear if we may have already executed an instruction in
9369 * our debug context. When clear, we assume IEMCPU holds
9370 * valid CPU mode info.
9371 *
9372 * The @a fSameCtx parameter is now misleading and obsolete.
9373 * @param pszFunction The IEM function doing the execution.
9374 */
9375static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9376{
9377# ifdef IN_RING3
9378 if (LogIs2Enabled())
9379 {
9380 char szInstr[256];
9381 uint32_t cbInstr = 0;
9382 if (fSameCtx)
9383 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9384 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9385 szInstr, sizeof(szInstr), &cbInstr);
9386 else
9387 {
9388 uint32_t fFlags = 0;
9389 switch (pVCpu->iem.s.enmCpuMode)
9390 {
9391 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9392 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9393 case IEMMODE_16BIT:
9394 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9395 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9396 else
9397 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9398 break;
9399 }
9400 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9401 szInstr, sizeof(szInstr), &cbInstr);
9402 }
9403
9404 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9405 Log2(("**** %s\n"
9406 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9407 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9408 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9409 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9410 " %s\n"
9411 , pszFunction,
9412 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9413 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9414 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9415 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9416 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9417 szInstr));
9418
9419 if (LogIs3Enabled())
9420 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9421 }
9422 else
9423# endif
9424 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9425 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9426 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9427}
9428#endif /* LOG_ENABLED */
9429
9430
9431#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9432/**
9433 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9434 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9435 *
9436 * @returns Modified rcStrict.
9437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9438 * @param rcStrict The instruction execution status.
9439 */
9440static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9441{
9442 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9443 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9444 {
9445 /* VMX preemption timer takes priority over NMI-window exits. */
9446 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9447 {
9448 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9449 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9450 }
9451 /*
9452 * Check remaining intercepts.
9453 *
9454 * NMI-window and Interrupt-window VM-exits.
9455 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9456 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9457 *
9458 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9459 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9460 */
9461 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9462 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9463 && !TRPMHasTrap(pVCpu))
9464 {
9465 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9466 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9467 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9468 {
9469 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9470 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9471 }
9472 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9473 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9474 {
9475 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9476 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9477 }
9478 }
9479 }
9480 /* TPR-below threshold/APIC write has the highest priority. */
9481 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9482 {
9483 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9484 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9485 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9486 }
9487 /* MTF takes priority over VMX-preemption timer. */
9488 else
9489 {
9490 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9491 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9492 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9493 }
9494 return rcStrict;
9495}
9496#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9497
9498
9499/**
9500 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9501 * IEMExecOneWithPrefetchedByPC.
9502 *
9503 * Similar code is found in IEMExecLots.
9504 *
9505 * @return Strict VBox status code.
9506 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9507 * @param fExecuteInhibit If set, execute the instruction following CLI,
9508 * POP SS and MOV SS,GR.
9509 * @param pszFunction The calling function name.
9510 */
9511DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9512{
9513 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9514 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9515 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9516 RT_NOREF_PV(pszFunction);
9517
9518#ifdef IEM_WITH_SETJMP
9519 VBOXSTRICTRC rcStrict;
9520 jmp_buf JmpBuf;
9521 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9522 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9523 if ((rcStrict = setjmp(JmpBuf)) == 0)
9524 {
9525 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9526 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9527 }
9528 else
9529 pVCpu->iem.s.cLongJumps++;
9530 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9531#else
9532 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9533 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9534#endif
9535 if (rcStrict == VINF_SUCCESS)
9536 pVCpu->iem.s.cInstructions++;
9537 if (pVCpu->iem.s.cActiveMappings > 0)
9538 {
9539 Assert(rcStrict != VINF_SUCCESS);
9540 iemMemRollback(pVCpu);
9541 }
9542 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9543 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9544 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9545
9546//#ifdef DEBUG
9547// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9548//#endif
9549
9550#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9551 /*
9552 * Perform any VMX nested-guest instruction boundary actions.
9553 *
9554 * If any of these causes a VM-exit, we must skip executing the next
9555 * instruction (would run into stale page tables). A VM-exit makes sure
9556 * there is no interrupt-inhibition, so that should ensure we don't go
9557 * to try execute the next instruction. Clearing fExecuteInhibit is
9558 * problematic because of the setjmp/longjmp clobbering above.
9559 */
9560 if ( rcStrict == VINF_SUCCESS
9561 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9562 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9563 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
9564#endif
9565
9566 /* Execute the next instruction as well if a cli, pop ss or
9567 mov ss, Gr has just completed successfully. */
9568 if ( fExecuteInhibit
9569 && rcStrict == VINF_SUCCESS
9570 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9571 && EMIsInhibitInterruptsActive(pVCpu))
9572 {
9573 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9574 if (rcStrict == VINF_SUCCESS)
9575 {
9576#ifdef LOG_ENABLED
9577 iemLogCurInstr(pVCpu, false, pszFunction);
9578#endif
9579#ifdef IEM_WITH_SETJMP
9580 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9581 if ((rcStrict = setjmp(JmpBuf)) == 0)
9582 {
9583 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9584 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9585 }
9586 else
9587 pVCpu->iem.s.cLongJumps++;
9588 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9589#else
9590 IEM_OPCODE_GET_NEXT_U8(&b);
9591 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9592#endif
9593 if (rcStrict == VINF_SUCCESS)
9594 pVCpu->iem.s.cInstructions++;
9595 if (pVCpu->iem.s.cActiveMappings > 0)
9596 {
9597 Assert(rcStrict != VINF_SUCCESS);
9598 iemMemRollback(pVCpu);
9599 }
9600 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9601 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9602 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9603 }
9604 else if (pVCpu->iem.s.cActiveMappings > 0)
9605 iemMemRollback(pVCpu);
9606 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9607 }
9608
9609 /*
9610 * Return value fiddling, statistics and sanity assertions.
9611 */
9612 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9613
9614 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9615 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9616 return rcStrict;
9617}
9618
9619
9620/**
9621 * Execute one instruction.
9622 *
9623 * @return Strict VBox status code.
9624 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9625 */
9626VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9627{
9628 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9629#ifdef LOG_ENABLED
9630 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9631#endif
9632
9633 /*
9634 * Do the decoding and emulation.
9635 */
9636 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9637 if (rcStrict == VINF_SUCCESS)
9638 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9639 else if (pVCpu->iem.s.cActiveMappings > 0)
9640 iemMemRollback(pVCpu);
9641
9642 if (rcStrict != VINF_SUCCESS)
9643 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9644 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9645 return rcStrict;
9646}
9647
9648
9649VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9650{
9651 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9652
9653 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9654 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9655 if (rcStrict == VINF_SUCCESS)
9656 {
9657 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9658 if (pcbWritten)
9659 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9660 }
9661 else if (pVCpu->iem.s.cActiveMappings > 0)
9662 iemMemRollback(pVCpu);
9663
9664 return rcStrict;
9665}
9666
9667
9668VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9669 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9670{
9671 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9672
9673 VBOXSTRICTRC rcStrict;
9674 if ( cbOpcodeBytes
9675 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9676 {
9677 iemInitDecoder(pVCpu, false, false);
9678#ifdef IEM_WITH_CODE_TLB
9679 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9680 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9681 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9682 pVCpu->iem.s.offCurInstrStart = 0;
9683 pVCpu->iem.s.offInstrNextByte = 0;
9684#else
9685 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9686 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9687#endif
9688 rcStrict = VINF_SUCCESS;
9689 }
9690 else
9691 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9692 if (rcStrict == VINF_SUCCESS)
9693 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9694 else if (pVCpu->iem.s.cActiveMappings > 0)
9695 iemMemRollback(pVCpu);
9696
9697 return rcStrict;
9698}
9699
9700
9701VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9702{
9703 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9704
9705 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9706 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9707 if (rcStrict == VINF_SUCCESS)
9708 {
9709 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9710 if (pcbWritten)
9711 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9712 }
9713 else if (pVCpu->iem.s.cActiveMappings > 0)
9714 iemMemRollback(pVCpu);
9715
9716 return rcStrict;
9717}
9718
9719
9720VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9721 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9722{
9723 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9724
9725 VBOXSTRICTRC rcStrict;
9726 if ( cbOpcodeBytes
9727 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9728 {
9729 iemInitDecoder(pVCpu, true, false);
9730#ifdef IEM_WITH_CODE_TLB
9731 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9732 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9733 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9734 pVCpu->iem.s.offCurInstrStart = 0;
9735 pVCpu->iem.s.offInstrNextByte = 0;
9736#else
9737 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9738 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9739#endif
9740 rcStrict = VINF_SUCCESS;
9741 }
9742 else
9743 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9744 if (rcStrict == VINF_SUCCESS)
9745 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9746 else if (pVCpu->iem.s.cActiveMappings > 0)
9747 iemMemRollback(pVCpu);
9748
9749 return rcStrict;
9750}
9751
9752
9753/**
9754 * For debugging DISGetParamSize, may come in handy.
9755 *
9756 * @returns Strict VBox status code.
9757 * @param pVCpu The cross context virtual CPU structure of the
9758 * calling EMT.
9759 * @param pCtxCore The context core structure.
9760 * @param OpcodeBytesPC The PC of the opcode bytes.
9761 * @param pvOpcodeBytes Prefeched opcode bytes.
9762 * @param cbOpcodeBytes Number of prefetched bytes.
9763 * @param pcbWritten Where to return the number of bytes written.
9764 * Optional.
9765 */
9766VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9767 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9768 uint32_t *pcbWritten)
9769{
9770 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9771
9772 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9773 VBOXSTRICTRC rcStrict;
9774 if ( cbOpcodeBytes
9775 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9776 {
9777 iemInitDecoder(pVCpu, true, false);
9778#ifdef IEM_WITH_CODE_TLB
9779 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9780 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9781 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9782 pVCpu->iem.s.offCurInstrStart = 0;
9783 pVCpu->iem.s.offInstrNextByte = 0;
9784#else
9785 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9786 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9787#endif
9788 rcStrict = VINF_SUCCESS;
9789 }
9790 else
9791 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9792 if (rcStrict == VINF_SUCCESS)
9793 {
9794 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9795 if (pcbWritten)
9796 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9797 }
9798 else if (pVCpu->iem.s.cActiveMappings > 0)
9799 iemMemRollback(pVCpu);
9800
9801 return rcStrict;
9802}
9803
9804
9805/**
9806 * For handling split cacheline lock operations when the host has split-lock
9807 * detection enabled.
9808 *
9809 * This will cause the interpreter to disregard the lock prefix and implicit
9810 * locking (xchg).
9811 *
9812 * @returns Strict VBox status code.
9813 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9814 */
9815VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9816{
9817 /*
9818 * Do the decoding and emulation.
9819 */
9820 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9821 if (rcStrict == VINF_SUCCESS)
9822 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9823 else if (pVCpu->iem.s.cActiveMappings > 0)
9824 iemMemRollback(pVCpu);
9825
9826 if (rcStrict != VINF_SUCCESS)
9827 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9828 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9829 return rcStrict;
9830}
9831
9832
9833VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9834{
9835 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9836 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9837
9838 /*
9839 * See if there is an interrupt pending in TRPM, inject it if we can.
9840 */
9841 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9842#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9843 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9844 if (fIntrEnabled)
9845 {
9846 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9847 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9848 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9849 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9850 else
9851 {
9852 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9853 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9854 }
9855 }
9856#else
9857 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9858#endif
9859
9860 /** @todo What if we are injecting an exception and not an interrupt? Is that
9861 * possible here? For now we assert it is indeed only an interrupt. */
9862 if ( fIntrEnabled
9863 && TRPMHasTrap(pVCpu)
9864 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
9865 {
9866 uint8_t u8TrapNo;
9867 TRPMEVENT enmType;
9868 uint32_t uErrCode;
9869 RTGCPTR uCr2;
9870 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
9871 AssertRC(rc2);
9872 Assert(enmType == TRPM_HARDWARE_INT);
9873 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9874 TRPMResetTrap(pVCpu);
9875#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9876 /* Injecting an event may cause a VM-exit. */
9877 if ( rcStrict != VINF_SUCCESS
9878 && rcStrict != VINF_IEM_RAISED_XCPT)
9879 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9880#else
9881 NOREF(rcStrict);
9882#endif
9883 }
9884
9885 /*
9886 * Initial decoder init w/ prefetch, then setup setjmp.
9887 */
9888 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9889 if (rcStrict == VINF_SUCCESS)
9890 {
9891#ifdef IEM_WITH_SETJMP
9892 jmp_buf JmpBuf;
9893 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9894 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9895 pVCpu->iem.s.cActiveMappings = 0;
9896 if ((rcStrict = setjmp(JmpBuf)) == 0)
9897#endif
9898 {
9899 /*
9900 * The run loop. We limit ourselves to 4096 instructions right now.
9901 */
9902 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9903 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9904 for (;;)
9905 {
9906 /*
9907 * Log the state.
9908 */
9909#ifdef LOG_ENABLED
9910 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9911#endif
9912
9913 /*
9914 * Do the decoding and emulation.
9915 */
9916 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9917 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9918 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9919 {
9920 Assert(pVCpu->iem.s.cActiveMappings == 0);
9921 pVCpu->iem.s.cInstructions++;
9922 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9923 {
9924 uint64_t fCpu = pVCpu->fLocalForcedActions
9925 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9926 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9927 | VMCPU_FF_TLB_FLUSH
9928 | VMCPU_FF_INHIBIT_INTERRUPTS
9929 | VMCPU_FF_BLOCK_NMIS
9930 | VMCPU_FF_UNHALT ));
9931
9932 if (RT_LIKELY( ( !fCpu
9933 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9934 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9935 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9936 {
9937 if (cMaxInstructionsGccStupidity-- > 0)
9938 {
9939 /* Poll timers every now an then according to the caller's specs. */
9940 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9941 || !TMTimerPollBool(pVM, pVCpu))
9942 {
9943 Assert(pVCpu->iem.s.cActiveMappings == 0);
9944 iemReInitDecoder(pVCpu);
9945 continue;
9946 }
9947 }
9948 }
9949 }
9950 Assert(pVCpu->iem.s.cActiveMappings == 0);
9951 }
9952 else if (pVCpu->iem.s.cActiveMappings > 0)
9953 iemMemRollback(pVCpu);
9954 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9955 break;
9956 }
9957 }
9958#ifdef IEM_WITH_SETJMP
9959 else
9960 {
9961 if (pVCpu->iem.s.cActiveMappings > 0)
9962 iemMemRollback(pVCpu);
9963# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9964 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9965# endif
9966 pVCpu->iem.s.cLongJumps++;
9967 }
9968 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9969#endif
9970
9971 /*
9972 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9973 */
9974 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9975 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9976 }
9977 else
9978 {
9979 if (pVCpu->iem.s.cActiveMappings > 0)
9980 iemMemRollback(pVCpu);
9981
9982#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9983 /*
9984 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9985 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9986 */
9987 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9988#endif
9989 }
9990
9991 /*
9992 * Maybe re-enter raw-mode and log.
9993 */
9994 if (rcStrict != VINF_SUCCESS)
9995 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9996 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9997 if (pcInstructions)
9998 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9999 return rcStrict;
10000}
10001
10002
10003/**
10004 * Interface used by EMExecuteExec, does exit statistics and limits.
10005 *
10006 * @returns Strict VBox status code.
10007 * @param pVCpu The cross context virtual CPU structure.
10008 * @param fWillExit To be defined.
10009 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10010 * @param cMaxInstructions Maximum number of instructions to execute.
10011 * @param cMaxInstructionsWithoutExits
10012 * The max number of instructions without exits.
10013 * @param pStats Where to return statistics.
10014 */
10015VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10016 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10017{
10018 NOREF(fWillExit); /** @todo define flexible exit crits */
10019
10020 /*
10021 * Initialize return stats.
10022 */
10023 pStats->cInstructions = 0;
10024 pStats->cExits = 0;
10025 pStats->cMaxExitDistance = 0;
10026 pStats->cReserved = 0;
10027
10028 /*
10029 * Initial decoder init w/ prefetch, then setup setjmp.
10030 */
10031 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10032 if (rcStrict == VINF_SUCCESS)
10033 {
10034#ifdef IEM_WITH_SETJMP
10035 jmp_buf JmpBuf;
10036 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10037 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10038 pVCpu->iem.s.cActiveMappings = 0;
10039 if ((rcStrict = setjmp(JmpBuf)) == 0)
10040#endif
10041 {
10042#ifdef IN_RING0
10043 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10044#endif
10045 uint32_t cInstructionSinceLastExit = 0;
10046
10047 /*
10048 * The run loop. We limit ourselves to 4096 instructions right now.
10049 */
10050 PVM pVM = pVCpu->CTX_SUFF(pVM);
10051 for (;;)
10052 {
10053 /*
10054 * Log the state.
10055 */
10056#ifdef LOG_ENABLED
10057 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10058#endif
10059
10060 /*
10061 * Do the decoding and emulation.
10062 */
10063 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10064
10065 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10066 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10067
10068 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10069 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10070 {
10071 pStats->cExits += 1;
10072 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10073 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10074 cInstructionSinceLastExit = 0;
10075 }
10076
10077 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10078 {
10079 Assert(pVCpu->iem.s.cActiveMappings == 0);
10080 pVCpu->iem.s.cInstructions++;
10081 pStats->cInstructions++;
10082 cInstructionSinceLastExit++;
10083 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10084 {
10085 uint64_t fCpu = pVCpu->fLocalForcedActions
10086 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10087 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10088 | VMCPU_FF_TLB_FLUSH
10089 | VMCPU_FF_INHIBIT_INTERRUPTS
10090 | VMCPU_FF_BLOCK_NMIS
10091 | VMCPU_FF_UNHALT ));
10092
10093 if (RT_LIKELY( ( ( !fCpu
10094 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10095 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10096 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10097 || pStats->cInstructions < cMinInstructions))
10098 {
10099 if (pStats->cInstructions < cMaxInstructions)
10100 {
10101 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10102 {
10103#ifdef IN_RING0
10104 if ( !fCheckPreemptionPending
10105 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10106#endif
10107 {
10108 Assert(pVCpu->iem.s.cActiveMappings == 0);
10109 iemReInitDecoder(pVCpu);
10110 continue;
10111 }
10112#ifdef IN_RING0
10113 rcStrict = VINF_EM_RAW_INTERRUPT;
10114 break;
10115#endif
10116 }
10117 }
10118 }
10119 Assert(!(fCpu & VMCPU_FF_IEM));
10120 }
10121 Assert(pVCpu->iem.s.cActiveMappings == 0);
10122 }
10123 else if (pVCpu->iem.s.cActiveMappings > 0)
10124 iemMemRollback(pVCpu);
10125 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10126 break;
10127 }
10128 }
10129#ifdef IEM_WITH_SETJMP
10130 else
10131 {
10132 if (pVCpu->iem.s.cActiveMappings > 0)
10133 iemMemRollback(pVCpu);
10134 pVCpu->iem.s.cLongJumps++;
10135 }
10136 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10137#endif
10138
10139 /*
10140 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10141 */
10142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10144 }
10145 else
10146 {
10147 if (pVCpu->iem.s.cActiveMappings > 0)
10148 iemMemRollback(pVCpu);
10149
10150#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10151 /*
10152 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10153 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10154 */
10155 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10156#endif
10157 }
10158
10159 /*
10160 * Maybe re-enter raw-mode and log.
10161 */
10162 if (rcStrict != VINF_SUCCESS)
10163 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10164 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10165 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10166 return rcStrict;
10167}
10168
10169
10170/**
10171 * Injects a trap, fault, abort, software interrupt or external interrupt.
10172 *
10173 * The parameter list matches TRPMQueryTrapAll pretty closely.
10174 *
10175 * @returns Strict VBox status code.
10176 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10177 * @param u8TrapNo The trap number.
10178 * @param enmType What type is it (trap/fault/abort), software
10179 * interrupt or hardware interrupt.
10180 * @param uErrCode The error code if applicable.
10181 * @param uCr2 The CR2 value if applicable.
10182 * @param cbInstr The instruction length (only relevant for
10183 * software interrupts).
10184 */
10185VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10186 uint8_t cbInstr)
10187{
10188 iemInitDecoder(pVCpu, false, false);
10189#ifdef DBGFTRACE_ENABLED
10190 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10191 u8TrapNo, enmType, uErrCode, uCr2);
10192#endif
10193
10194 uint32_t fFlags;
10195 switch (enmType)
10196 {
10197 case TRPM_HARDWARE_INT:
10198 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10199 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10200 uErrCode = uCr2 = 0;
10201 break;
10202
10203 case TRPM_SOFTWARE_INT:
10204 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10205 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10206 uErrCode = uCr2 = 0;
10207 break;
10208
10209 case TRPM_TRAP:
10210 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10211 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10212 if (u8TrapNo == X86_XCPT_PF)
10213 fFlags |= IEM_XCPT_FLAGS_CR2;
10214 switch (u8TrapNo)
10215 {
10216 case X86_XCPT_DF:
10217 case X86_XCPT_TS:
10218 case X86_XCPT_NP:
10219 case X86_XCPT_SS:
10220 case X86_XCPT_PF:
10221 case X86_XCPT_AC:
10222 case X86_XCPT_GP:
10223 fFlags |= IEM_XCPT_FLAGS_ERR;
10224 break;
10225 }
10226 break;
10227
10228 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10229 }
10230
10231 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10232
10233 if (pVCpu->iem.s.cActiveMappings > 0)
10234 iemMemRollback(pVCpu);
10235
10236 return rcStrict;
10237}
10238
10239
10240/**
10241 * Injects the active TRPM event.
10242 *
10243 * @returns Strict VBox status code.
10244 * @param pVCpu The cross context virtual CPU structure.
10245 */
10246VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10247{
10248#ifndef IEM_IMPLEMENTS_TASKSWITCH
10249 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10250#else
10251 uint8_t u8TrapNo;
10252 TRPMEVENT enmType;
10253 uint32_t uErrCode;
10254 RTGCUINTPTR uCr2;
10255 uint8_t cbInstr;
10256 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10257 if (RT_FAILURE(rc))
10258 return rc;
10259
10260 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10261 * ICEBP \#DB injection as a special case. */
10262 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10263#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10264 if (rcStrict == VINF_SVM_VMEXIT)
10265 rcStrict = VINF_SUCCESS;
10266#endif
10267#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10268 if (rcStrict == VINF_VMX_VMEXIT)
10269 rcStrict = VINF_SUCCESS;
10270#endif
10271 /** @todo Are there any other codes that imply the event was successfully
10272 * delivered to the guest? See @bugref{6607}. */
10273 if ( rcStrict == VINF_SUCCESS
10274 || rcStrict == VINF_IEM_RAISED_XCPT)
10275 TRPMResetTrap(pVCpu);
10276
10277 return rcStrict;
10278#endif
10279}
10280
10281
10282VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10283{
10284 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10285 return VERR_NOT_IMPLEMENTED;
10286}
10287
10288
10289VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10290{
10291 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10292 return VERR_NOT_IMPLEMENTED;
10293}
10294
10295
10296#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10297/**
10298 * Executes a IRET instruction with default operand size.
10299 *
10300 * This is for PATM.
10301 *
10302 * @returns VBox status code.
10303 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10304 * @param pCtxCore The register frame.
10305 */
10306VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10307{
10308 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10309
10310 iemCtxCoreToCtx(pCtx, pCtxCore);
10311 iemInitDecoder(pVCpu);
10312 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10313 if (rcStrict == VINF_SUCCESS)
10314 iemCtxToCtxCore(pCtxCore, pCtx);
10315 else
10316 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10317 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10318 return rcStrict;
10319}
10320#endif
10321
10322
10323/**
10324 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10325 *
10326 * This API ASSUMES that the caller has already verified that the guest code is
10327 * allowed to access the I/O port. (The I/O port is in the DX register in the
10328 * guest state.)
10329 *
10330 * @returns Strict VBox status code.
10331 * @param pVCpu The cross context virtual CPU structure.
10332 * @param cbValue The size of the I/O port access (1, 2, or 4).
10333 * @param enmAddrMode The addressing mode.
10334 * @param fRepPrefix Indicates whether a repeat prefix is used
10335 * (doesn't matter which for this instruction).
10336 * @param cbInstr The instruction length in bytes.
10337 * @param iEffSeg The effective segment address.
10338 * @param fIoChecked Whether the access to the I/O port has been
10339 * checked or not. It's typically checked in the
10340 * HM scenario.
10341 */
10342VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10343 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10344{
10345 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10346 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10347
10348 /*
10349 * State init.
10350 */
10351 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10352
10353 /*
10354 * Switch orgy for getting to the right handler.
10355 */
10356 VBOXSTRICTRC rcStrict;
10357 if (fRepPrefix)
10358 {
10359 switch (enmAddrMode)
10360 {
10361 case IEMMODE_16BIT:
10362 switch (cbValue)
10363 {
10364 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10365 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10366 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10367 default:
10368 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10369 }
10370 break;
10371
10372 case IEMMODE_32BIT:
10373 switch (cbValue)
10374 {
10375 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10376 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10377 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10378 default:
10379 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10380 }
10381 break;
10382
10383 case IEMMODE_64BIT:
10384 switch (cbValue)
10385 {
10386 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10387 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10388 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10389 default:
10390 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10391 }
10392 break;
10393
10394 default:
10395 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10396 }
10397 }
10398 else
10399 {
10400 switch (enmAddrMode)
10401 {
10402 case IEMMODE_16BIT:
10403 switch (cbValue)
10404 {
10405 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10406 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10407 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10408 default:
10409 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10410 }
10411 break;
10412
10413 case IEMMODE_32BIT:
10414 switch (cbValue)
10415 {
10416 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10417 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10418 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10419 default:
10420 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10421 }
10422 break;
10423
10424 case IEMMODE_64BIT:
10425 switch (cbValue)
10426 {
10427 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10428 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10429 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10430 default:
10431 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10432 }
10433 break;
10434
10435 default:
10436 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10437 }
10438 }
10439
10440 if (pVCpu->iem.s.cActiveMappings)
10441 iemMemRollback(pVCpu);
10442
10443 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10444}
10445
10446
10447/**
10448 * Interface for HM and EM for executing string I/O IN (read) instructions.
10449 *
10450 * This API ASSUMES that the caller has already verified that the guest code is
10451 * allowed to access the I/O port. (The I/O port is in the DX register in the
10452 * guest state.)
10453 *
10454 * @returns Strict VBox status code.
10455 * @param pVCpu The cross context virtual CPU structure.
10456 * @param cbValue The size of the I/O port access (1, 2, or 4).
10457 * @param enmAddrMode The addressing mode.
10458 * @param fRepPrefix Indicates whether a repeat prefix is used
10459 * (doesn't matter which for this instruction).
10460 * @param cbInstr The instruction length in bytes.
10461 * @param fIoChecked Whether the access to the I/O port has been
10462 * checked or not. It's typically checked in the
10463 * HM scenario.
10464 */
10465VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10466 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10467{
10468 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10469
10470 /*
10471 * State init.
10472 */
10473 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10474
10475 /*
10476 * Switch orgy for getting to the right handler.
10477 */
10478 VBOXSTRICTRC rcStrict;
10479 if (fRepPrefix)
10480 {
10481 switch (enmAddrMode)
10482 {
10483 case IEMMODE_16BIT:
10484 switch (cbValue)
10485 {
10486 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10487 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10488 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10489 default:
10490 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10491 }
10492 break;
10493
10494 case IEMMODE_32BIT:
10495 switch (cbValue)
10496 {
10497 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10498 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10499 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10500 default:
10501 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10502 }
10503 break;
10504
10505 case IEMMODE_64BIT:
10506 switch (cbValue)
10507 {
10508 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10509 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10510 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10511 default:
10512 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10513 }
10514 break;
10515
10516 default:
10517 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10518 }
10519 }
10520 else
10521 {
10522 switch (enmAddrMode)
10523 {
10524 case IEMMODE_16BIT:
10525 switch (cbValue)
10526 {
10527 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10528 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10529 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10530 default:
10531 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10532 }
10533 break;
10534
10535 case IEMMODE_32BIT:
10536 switch (cbValue)
10537 {
10538 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10539 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10540 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10541 default:
10542 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10543 }
10544 break;
10545
10546 case IEMMODE_64BIT:
10547 switch (cbValue)
10548 {
10549 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10550 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10551 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10552 default:
10553 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10554 }
10555 break;
10556
10557 default:
10558 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10559 }
10560 }
10561
10562 if ( pVCpu->iem.s.cActiveMappings == 0
10563 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10564 { /* likely */ }
10565 else
10566 {
10567 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10568 iemMemRollback(pVCpu);
10569 }
10570 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10571}
10572
10573
10574/**
10575 * Interface for rawmode to write execute an OUT instruction.
10576 *
10577 * @returns Strict VBox status code.
10578 * @param pVCpu The cross context virtual CPU structure.
10579 * @param cbInstr The instruction length in bytes.
10580 * @param u16Port The port to read.
10581 * @param fImm Whether the port is specified using an immediate operand or
10582 * using the implicit DX register.
10583 * @param cbReg The register size.
10584 *
10585 * @remarks In ring-0 not all of the state needs to be synced in.
10586 */
10587VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10588{
10589 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10590 Assert(cbReg <= 4 && cbReg != 3);
10591
10592 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10593 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10594 Assert(!pVCpu->iem.s.cActiveMappings);
10595 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10596}
10597
10598
10599/**
10600 * Interface for rawmode to write execute an IN instruction.
10601 *
10602 * @returns Strict VBox status code.
10603 * @param pVCpu The cross context virtual CPU structure.
10604 * @param cbInstr The instruction length in bytes.
10605 * @param u16Port The port to read.
10606 * @param fImm Whether the port is specified using an immediate operand or
10607 * using the implicit DX.
10608 * @param cbReg The register size.
10609 */
10610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10611{
10612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10613 Assert(cbReg <= 4 && cbReg != 3);
10614
10615 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10616 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10617 Assert(!pVCpu->iem.s.cActiveMappings);
10618 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10619}
10620
10621
10622/**
10623 * Interface for HM and EM to write to a CRx register.
10624 *
10625 * @returns Strict VBox status code.
10626 * @param pVCpu The cross context virtual CPU structure.
10627 * @param cbInstr The instruction length in bytes.
10628 * @param iCrReg The control register number (destination).
10629 * @param iGReg The general purpose register number (source).
10630 *
10631 * @remarks In ring-0 not all of the state needs to be synced in.
10632 */
10633VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10634{
10635 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10636 Assert(iCrReg < 16);
10637 Assert(iGReg < 16);
10638
10639 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10640 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10641 Assert(!pVCpu->iem.s.cActiveMappings);
10642 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10643}
10644
10645
10646/**
10647 * Interface for HM and EM to read from a CRx register.
10648 *
10649 * @returns Strict VBox status code.
10650 * @param pVCpu The cross context virtual CPU structure.
10651 * @param cbInstr The instruction length in bytes.
10652 * @param iGReg The general purpose register number (destination).
10653 * @param iCrReg The control register number (source).
10654 *
10655 * @remarks In ring-0 not all of the state needs to be synced in.
10656 */
10657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10658{
10659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10660 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10661 | CPUMCTX_EXTRN_APIC_TPR);
10662 Assert(iCrReg < 16);
10663 Assert(iGReg < 16);
10664
10665 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10666 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10667 Assert(!pVCpu->iem.s.cActiveMappings);
10668 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10669}
10670
10671
10672/**
10673 * Interface for HM and EM to clear the CR0[TS] bit.
10674 *
10675 * @returns Strict VBox status code.
10676 * @param pVCpu The cross context virtual CPU structure.
10677 * @param cbInstr The instruction length in bytes.
10678 *
10679 * @remarks In ring-0 not all of the state needs to be synced in.
10680 */
10681VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10682{
10683 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10684
10685 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10686 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10687 Assert(!pVCpu->iem.s.cActiveMappings);
10688 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10689}
10690
10691
10692/**
10693 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10694 *
10695 * @returns Strict VBox status code.
10696 * @param pVCpu The cross context virtual CPU structure.
10697 * @param cbInstr The instruction length in bytes.
10698 * @param uValue The value to load into CR0.
10699 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10700 * memory operand. Otherwise pass NIL_RTGCPTR.
10701 *
10702 * @remarks In ring-0 not all of the state needs to be synced in.
10703 */
10704VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10705{
10706 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10707
10708 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10709 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10710 Assert(!pVCpu->iem.s.cActiveMappings);
10711 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10712}
10713
10714
10715/**
10716 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10717 *
10718 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10719 *
10720 * @returns Strict VBox status code.
10721 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10722 * @param cbInstr The instruction length in bytes.
10723 * @remarks In ring-0 not all of the state needs to be synced in.
10724 * @thread EMT(pVCpu)
10725 */
10726VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10727{
10728 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10729
10730 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10731 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10732 Assert(!pVCpu->iem.s.cActiveMappings);
10733 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10734}
10735
10736
10737/**
10738 * Interface for HM and EM to emulate the WBINVD instruction.
10739 *
10740 * @returns Strict VBox status code.
10741 * @param pVCpu The cross context virtual CPU structure.
10742 * @param cbInstr The instruction length in bytes.
10743 *
10744 * @remarks In ring-0 not all of the state needs to be synced in.
10745 */
10746VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10747{
10748 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10749
10750 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10751 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10752 Assert(!pVCpu->iem.s.cActiveMappings);
10753 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10754}
10755
10756
10757/**
10758 * Interface for HM and EM to emulate the INVD instruction.
10759 *
10760 * @returns Strict VBox status code.
10761 * @param pVCpu The cross context virtual CPU structure.
10762 * @param cbInstr The instruction length in bytes.
10763 *
10764 * @remarks In ring-0 not all of the state needs to be synced in.
10765 */
10766VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10767{
10768 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10769
10770 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10771 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10772 Assert(!pVCpu->iem.s.cActiveMappings);
10773 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10774}
10775
10776
10777/**
10778 * Interface for HM and EM to emulate the INVLPG instruction.
10779 *
10780 * @returns Strict VBox status code.
10781 * @retval VINF_PGM_SYNC_CR3
10782 *
10783 * @param pVCpu The cross context virtual CPU structure.
10784 * @param cbInstr The instruction length in bytes.
10785 * @param GCPtrPage The effective address of the page to invalidate.
10786 *
10787 * @remarks In ring-0 not all of the state needs to be synced in.
10788 */
10789VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10790{
10791 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10792
10793 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10794 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10795 Assert(!pVCpu->iem.s.cActiveMappings);
10796 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10797}
10798
10799
10800/**
10801 * Interface for HM and EM to emulate the INVPCID instruction.
10802 *
10803 * @returns Strict VBox status code.
10804 * @retval VINF_PGM_SYNC_CR3
10805 *
10806 * @param pVCpu The cross context virtual CPU structure.
10807 * @param cbInstr The instruction length in bytes.
10808 * @param iEffSeg The effective segment register.
10809 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10810 * @param uType The invalidation type.
10811 *
10812 * @remarks In ring-0 not all of the state needs to be synced in.
10813 */
10814VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10815 uint64_t uType)
10816{
10817 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10818
10819 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10820 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10821 Assert(!pVCpu->iem.s.cActiveMappings);
10822 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10823}
10824
10825
10826/**
10827 * Interface for HM and EM to emulate the CPUID instruction.
10828 *
10829 * @returns Strict VBox status code.
10830 *
10831 * @param pVCpu The cross context virtual CPU structure.
10832 * @param cbInstr The instruction length in bytes.
10833 *
10834 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10835 */
10836VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10837{
10838 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10839 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10840
10841 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10842 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10843 Assert(!pVCpu->iem.s.cActiveMappings);
10844 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10845}
10846
10847
10848/**
10849 * Interface for HM and EM to emulate the RDPMC instruction.
10850 *
10851 * @returns Strict VBox status code.
10852 *
10853 * @param pVCpu The cross context virtual CPU structure.
10854 * @param cbInstr The instruction length in bytes.
10855 *
10856 * @remarks Not all of the state needs to be synced in.
10857 */
10858VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10859{
10860 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10861 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10862
10863 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10864 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10865 Assert(!pVCpu->iem.s.cActiveMappings);
10866 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10867}
10868
10869
10870/**
10871 * Interface for HM and EM to emulate the RDTSC instruction.
10872 *
10873 * @returns Strict VBox status code.
10874 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10875 *
10876 * @param pVCpu The cross context virtual CPU structure.
10877 * @param cbInstr The instruction length in bytes.
10878 *
10879 * @remarks Not all of the state needs to be synced in.
10880 */
10881VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10882{
10883 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10884 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10885
10886 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10887 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10888 Assert(!pVCpu->iem.s.cActiveMappings);
10889 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10890}
10891
10892
10893/**
10894 * Interface for HM and EM to emulate the RDTSCP instruction.
10895 *
10896 * @returns Strict VBox status code.
10897 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10898 *
10899 * @param pVCpu The cross context virtual CPU structure.
10900 * @param cbInstr The instruction length in bytes.
10901 *
10902 * @remarks Not all of the state needs to be synced in. Recommended
10903 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10904 */
10905VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10906{
10907 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10908 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10909
10910 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10911 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10912 Assert(!pVCpu->iem.s.cActiveMappings);
10913 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10914}
10915
10916
10917/**
10918 * Interface for HM and EM to emulate the RDMSR instruction.
10919 *
10920 * @returns Strict VBox status code.
10921 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10922 *
10923 * @param pVCpu The cross context virtual CPU structure.
10924 * @param cbInstr The instruction length in bytes.
10925 *
10926 * @remarks Not all of the state needs to be synced in. Requires RCX and
10927 * (currently) all MSRs.
10928 */
10929VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10930{
10931 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10932 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10933
10934 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10935 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10936 Assert(!pVCpu->iem.s.cActiveMappings);
10937 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10938}
10939
10940
10941/**
10942 * Interface for HM and EM to emulate the WRMSR instruction.
10943 *
10944 * @returns Strict VBox status code.
10945 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10946 *
10947 * @param pVCpu The cross context virtual CPU structure.
10948 * @param cbInstr The instruction length in bytes.
10949 *
10950 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10951 * and (currently) all MSRs.
10952 */
10953VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10954{
10955 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10956 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10957 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10958
10959 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10960 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10961 Assert(!pVCpu->iem.s.cActiveMappings);
10962 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10963}
10964
10965
10966/**
10967 * Interface for HM and EM to emulate the MONITOR instruction.
10968 *
10969 * @returns Strict VBox status code.
10970 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10971 *
10972 * @param pVCpu The cross context virtual CPU structure.
10973 * @param cbInstr The instruction length in bytes.
10974 *
10975 * @remarks Not all of the state needs to be synced in.
10976 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10977 * are used.
10978 */
10979VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10980{
10981 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10982 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10983
10984 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10985 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10986 Assert(!pVCpu->iem.s.cActiveMappings);
10987 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10988}
10989
10990
10991/**
10992 * Interface for HM and EM to emulate the MWAIT instruction.
10993 *
10994 * @returns Strict VBox status code.
10995 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10996 *
10997 * @param pVCpu The cross context virtual CPU structure.
10998 * @param cbInstr The instruction length in bytes.
10999 *
11000 * @remarks Not all of the state needs to be synced in.
11001 */
11002VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11003{
11004 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11005 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11006
11007 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11008 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11009 Assert(!pVCpu->iem.s.cActiveMappings);
11010 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11011}
11012
11013
11014/**
11015 * Interface for HM and EM to emulate the HLT instruction.
11016 *
11017 * @returns Strict VBox status code.
11018 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11019 *
11020 * @param pVCpu The cross context virtual CPU structure.
11021 * @param cbInstr The instruction length in bytes.
11022 *
11023 * @remarks Not all of the state needs to be synced in.
11024 */
11025VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11026{
11027 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11028
11029 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11030 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11031 Assert(!pVCpu->iem.s.cActiveMappings);
11032 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11033}
11034
11035
11036/**
11037 * Checks if IEM is in the process of delivering an event (interrupt or
11038 * exception).
11039 *
11040 * @returns true if we're in the process of raising an interrupt or exception,
11041 * false otherwise.
11042 * @param pVCpu The cross context virtual CPU structure.
11043 * @param puVector Where to store the vector associated with the
11044 * currently delivered event, optional.
11045 * @param pfFlags Where to store th event delivery flags (see
11046 * IEM_XCPT_FLAGS_XXX), optional.
11047 * @param puErr Where to store the error code associated with the
11048 * event, optional.
11049 * @param puCr2 Where to store the CR2 associated with the event,
11050 * optional.
11051 * @remarks The caller should check the flags to determine if the error code and
11052 * CR2 are valid for the event.
11053 */
11054VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11055{
11056 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11057 if (fRaisingXcpt)
11058 {
11059 if (puVector)
11060 *puVector = pVCpu->iem.s.uCurXcpt;
11061 if (pfFlags)
11062 *pfFlags = pVCpu->iem.s.fCurXcpt;
11063 if (puErr)
11064 *puErr = pVCpu->iem.s.uCurXcptErr;
11065 if (puCr2)
11066 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11067 }
11068 return fRaisingXcpt;
11069}
11070
11071#ifdef IN_RING3
11072
11073/**
11074 * Handles the unlikely and probably fatal merge cases.
11075 *
11076 * @returns Merged status code.
11077 * @param rcStrict Current EM status code.
11078 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11079 * with @a rcStrict.
11080 * @param iMemMap The memory mapping index. For error reporting only.
11081 * @param pVCpu The cross context virtual CPU structure of the calling
11082 * thread, for error reporting only.
11083 */
11084DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11085 unsigned iMemMap, PVMCPUCC pVCpu)
11086{
11087 if (RT_FAILURE_NP(rcStrict))
11088 return rcStrict;
11089
11090 if (RT_FAILURE_NP(rcStrictCommit))
11091 return rcStrictCommit;
11092
11093 if (rcStrict == rcStrictCommit)
11094 return rcStrictCommit;
11095
11096 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11097 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11098 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11099 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11100 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11101 return VERR_IOM_FF_STATUS_IPE;
11102}
11103
11104
11105/**
11106 * Helper for IOMR3ProcessForceFlag.
11107 *
11108 * @returns Merged status code.
11109 * @param rcStrict Current EM status code.
11110 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11111 * with @a rcStrict.
11112 * @param iMemMap The memory mapping index. For error reporting only.
11113 * @param pVCpu The cross context virtual CPU structure of the calling
11114 * thread, for error reporting only.
11115 */
11116DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11117{
11118 /* Simple. */
11119 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11120 return rcStrictCommit;
11121
11122 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11123 return rcStrict;
11124
11125 /* EM scheduling status codes. */
11126 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11127 && rcStrict <= VINF_EM_LAST))
11128 {
11129 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11130 && rcStrictCommit <= VINF_EM_LAST))
11131 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11132 }
11133
11134 /* Unlikely */
11135 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11136}
11137
11138
11139/**
11140 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11141 *
11142 * @returns Merge between @a rcStrict and what the commit operation returned.
11143 * @param pVM The cross context VM structure.
11144 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11145 * @param rcStrict The status code returned by ring-0 or raw-mode.
11146 */
11147VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11148{
11149 /*
11150 * Reset the pending commit.
11151 */
11152 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11153 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11154 ("%#x %#x %#x\n",
11155 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11156 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11157
11158 /*
11159 * Commit the pending bounce buffers (usually just one).
11160 */
11161 unsigned cBufs = 0;
11162 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11163 while (iMemMap-- > 0)
11164 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11165 {
11166 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11167 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11168 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11169
11170 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11171 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11172 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11173
11174 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11175 {
11176 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11177 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11178 pbBuf,
11179 cbFirst,
11180 PGMACCESSORIGIN_IEM);
11181 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11182 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11183 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11184 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11185 }
11186
11187 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11188 {
11189 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11190 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11191 pbBuf + cbFirst,
11192 cbSecond,
11193 PGMACCESSORIGIN_IEM);
11194 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11195 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11196 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11197 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11198 }
11199 cBufs++;
11200 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11201 }
11202
11203 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11204 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11205 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11206 pVCpu->iem.s.cActiveMappings = 0;
11207 return rcStrict;
11208}
11209
11210#endif /* IN_RING3 */
11211
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette