VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 95307

Last change on this file since 95307 was 95307, checked in by vboxsync, 2 years ago

VMM/IEMAll.cpp: Some data TLB tweaking. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 447.1 KB
Line 
1/* $Id: IEMAll.cpp 95307 2022-06-19 20:37:16Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow) : Basic enter/exit IEM state info.
65 * - Level 2 (Log2) : ?
66 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
67 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5) : Decoding details.
69 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7) : iret++ execution logging.
71 * - Level 8 (Log8) : Memory writes.
72 * - Level 9 (Log9) : Memory reads.
73 * - Level 10 (Log10): TLBs.
74 */
75
76/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
77#ifdef _MSC_VER
78# pragma warning(disable:4505)
79#endif
80
81
82/*********************************************************************************************************************************
83* Header Files *
84*********************************************************************************************************************************/
85#define LOG_GROUP LOG_GROUP_IEM
86#define VMCPU_INCL_CPUM_GST_CTX
87#include <VBox/vmm/iem.h>
88#include <VBox/vmm/cpum.h>
89#include <VBox/vmm/apic.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <VBox/vmm/iom.h>
93#include <VBox/vmm/em.h>
94#include <VBox/vmm/hm.h>
95#include <VBox/vmm/nem.h>
96#include <VBox/vmm/gim.h>
97#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
98# include <VBox/vmm/em.h>
99# include <VBox/vmm/hm_svm.h>
100#endif
101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
102# include <VBox/vmm/hmvmxinline.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#include "IEMInternal.h"
108#include <VBox/vmm/vmcc.h>
109#include <VBox/log.h>
110#include <VBox/err.h>
111#include <VBox/param.h>
112#include <VBox/dis.h>
113#include <VBox/disopcode.h>
114#include <iprt/asm-math.h>
115#include <iprt/assert.h>
116#include <iprt/string.h>
117#include <iprt/x86.h>
118
119#include "IEMInline.h"
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/**
126 * CPU exception classes.
127 */
128typedef enum IEMXCPTCLASS
129{
130 IEMXCPTCLASS_BENIGN,
131 IEMXCPTCLASS_CONTRIBUTORY,
132 IEMXCPTCLASS_PAGE_FAULT,
133 IEMXCPTCLASS_DOUBLE_FAULT
134} IEMXCPTCLASS;
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140#if defined(IEM_LOG_MEMORY_WRITES)
141/** What IEM just wrote. */
142uint8_t g_abIemWrote[256];
143/** How much IEM just wrote. */
144size_t g_cbIemWrote;
145#endif
146
147
148/*********************************************************************************************************************************
149* Internal Functions *
150*********************************************************************************************************************************/
151static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
152 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
153
154
155/**
156 * Initializes the decoder state.
157 *
158 * iemReInitDecoder is mostly a copy of this function.
159 *
160 * @param pVCpu The cross context virtual CPU structure of the
161 * calling thread.
162 * @param fBypassHandlers Whether to bypass access handlers.
163 * @param fDisregardLock Whether to disregard the LOCK prefix.
164 */
165DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
166{
167 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
168 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
177
178 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
179 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
180 pVCpu->iem.s.enmCpuMode = enmMode;
181 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
182 pVCpu->iem.s.enmEffAddrMode = enmMode;
183 if (enmMode != IEMMODE_64BIT)
184 {
185 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
186 pVCpu->iem.s.enmEffOpSize = enmMode;
187 }
188 else
189 {
190 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
191 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
192 }
193 pVCpu->iem.s.fPrefixes = 0;
194 pVCpu->iem.s.uRexReg = 0;
195 pVCpu->iem.s.uRexB = 0;
196 pVCpu->iem.s.uRexIndex = 0;
197 pVCpu->iem.s.idxPrefix = 0;
198 pVCpu->iem.s.uVex3rdReg = 0;
199 pVCpu->iem.s.uVexLength = 0;
200 pVCpu->iem.s.fEvexStuff = 0;
201 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
202#ifdef IEM_WITH_CODE_TLB
203 pVCpu->iem.s.pbInstrBuf = NULL;
204 pVCpu->iem.s.offInstrNextByte = 0;
205 pVCpu->iem.s.offCurInstrStart = 0;
206# ifdef VBOX_STRICT
207 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
208 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
209 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
210# endif
211#else
212 pVCpu->iem.s.offOpcode = 0;
213 pVCpu->iem.s.cbOpcode = 0;
214#endif
215 pVCpu->iem.s.offModRm = 0;
216 pVCpu->iem.s.cActiveMappings = 0;
217 pVCpu->iem.s.iNextMapping = 0;
218 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
219 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
220 pVCpu->iem.s.fDisregardLock = fDisregardLock;
221
222#ifdef DBGFTRACE_ENABLED
223 switch (enmMode)
224 {
225 case IEMMODE_64BIT:
226 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
227 break;
228 case IEMMODE_32BIT:
229 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
230 break;
231 case IEMMODE_16BIT:
232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
233 break;
234 }
235#endif
236}
237
238
239/**
240 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
241 *
242 * This is mostly a copy of iemInitDecoder.
243 *
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 */
246DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
247{
248 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
257
258 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
259 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
260 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
261 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
262 pVCpu->iem.s.enmEffAddrMode = enmMode;
263 if (enmMode != IEMMODE_64BIT)
264 {
265 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffOpSize = enmMode;
267 }
268 else
269 {
270 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
271 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
272 }
273 pVCpu->iem.s.fPrefixes = 0;
274 pVCpu->iem.s.uRexReg = 0;
275 pVCpu->iem.s.uRexB = 0;
276 pVCpu->iem.s.uRexIndex = 0;
277 pVCpu->iem.s.idxPrefix = 0;
278 pVCpu->iem.s.uVex3rdReg = 0;
279 pVCpu->iem.s.uVexLength = 0;
280 pVCpu->iem.s.fEvexStuff = 0;
281 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
282#ifdef IEM_WITH_CODE_TLB
283 if (pVCpu->iem.s.pbInstrBuf)
284 {
285 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
286 - pVCpu->iem.s.uInstrBufPc;
287 if (off < pVCpu->iem.s.cbInstrBufTotal)
288 {
289 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
290 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
291 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
292 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
293 else
294 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
295 }
296 else
297 {
298 pVCpu->iem.s.pbInstrBuf = NULL;
299 pVCpu->iem.s.offInstrNextByte = 0;
300 pVCpu->iem.s.offCurInstrStart = 0;
301 pVCpu->iem.s.cbInstrBuf = 0;
302 pVCpu->iem.s.cbInstrBufTotal = 0;
303 }
304 }
305 else
306 {
307 pVCpu->iem.s.offInstrNextByte = 0;
308 pVCpu->iem.s.offCurInstrStart = 0;
309 pVCpu->iem.s.cbInstrBuf = 0;
310 pVCpu->iem.s.cbInstrBufTotal = 0;
311 }
312#else
313 pVCpu->iem.s.cbOpcode = 0;
314 pVCpu->iem.s.offOpcode = 0;
315#endif
316 pVCpu->iem.s.offModRm = 0;
317 Assert(pVCpu->iem.s.cActiveMappings == 0);
318 pVCpu->iem.s.iNextMapping = 0;
319 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
320 Assert(pVCpu->iem.s.fBypassHandlers == false);
321
322#ifdef DBGFTRACE_ENABLED
323 switch (enmMode)
324 {
325 case IEMMODE_64BIT:
326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
327 break;
328 case IEMMODE_32BIT:
329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
330 break;
331 case IEMMODE_16BIT:
332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
333 break;
334 }
335#endif
336}
337
338
339
340/**
341 * Prefetch opcodes the first time when starting executing.
342 *
343 * @returns Strict VBox status code.
344 * @param pVCpu The cross context virtual CPU structure of the
345 * calling thread.
346 * @param fBypassHandlers Whether to bypass access handlers.
347 * @param fDisregardLock Whether to disregard LOCK prefixes.
348 *
349 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
350 * store them as such.
351 */
352static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
353{
354 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
355
356#ifdef IEM_WITH_CODE_TLB
357 /** @todo Do ITLB lookup here. */
358
359#else /* !IEM_WITH_CODE_TLB */
360
361 /*
362 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
363 *
364 * First translate CS:rIP to a physical address.
365 */
366 uint32_t cbToTryRead;
367 RTGCPTR GCPtrPC;
368 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
369 {
370 cbToTryRead = GUEST_PAGE_SIZE;
371 GCPtrPC = pVCpu->cpum.GstCtx.rip;
372 if (IEM_IS_CANONICAL(GCPtrPC))
373 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
374 else
375 return iemRaiseGeneralProtectionFault0(pVCpu);
376 }
377 else
378 {
379 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
380 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
381 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
382 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
383 else
384 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
385 if (cbToTryRead) { /* likely */ }
386 else /* overflowed */
387 {
388 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
389 cbToTryRead = UINT32_MAX;
390 }
391 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
392 Assert(GCPtrPC <= UINT32_MAX);
393 }
394
395 PGMPTWALK Walk;
396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
397 if (RT_SUCCESS(rc))
398 Assert(Walk.fSucceeded); /* probable. */
399 else
400 {
401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
402#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
403 if (Walk.fFailed & PGM_WALKFAIL_EPT)
404 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
405#endif
406 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
407 }
408 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
409 else
410 {
411 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
413 if (Walk.fFailed & PGM_WALKFAIL_EPT)
414 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
415#endif
416 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
417 }
418 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
419 else
420 {
421 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
422#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
423 if (Walk.fFailed & PGM_WALKFAIL_EPT)
424 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
425#endif
426 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
427 }
428 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
429 /** @todo Check reserved bits and such stuff. PGM is better at doing
430 * that, so do it when implementing the guest virtual address
431 * TLB... */
432
433 /*
434 * Read the bytes at this address.
435 */
436 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
437 if (cbToTryRead > cbLeftOnPage)
438 cbToTryRead = cbLeftOnPage;
439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
441
442 if (!pVCpu->iem.s.fBypassHandlers)
443 {
444 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
446 { /* likely */ }
447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
448 {
449 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
450 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
452 }
453 else
454 {
455 Log((RT_SUCCESS(rcStrict)
456 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
457 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
458 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
459 return rcStrict;
460 }
461 }
462 else
463 {
464 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
465 if (RT_SUCCESS(rc))
466 { /* likely */ }
467 else
468 {
469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
470 GCPtrPC, GCPhys, rc, cbToTryRead));
471 return rc;
472 }
473 }
474 pVCpu->iem.s.cbOpcode = cbToTryRead;
475#endif /* !IEM_WITH_CODE_TLB */
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Invalidates the IEM TLBs.
482 *
483 * This is called internally as well as by PGM when moving GC mappings.
484 *
485 * @returns
486 * @param pVCpu The cross context virtual CPU structure of the calling
487 * thread.
488 */
489VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
490{
491#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
492 Log10(("IEMTlbInvalidateAll\n"));
493# ifdef IEM_WITH_CODE_TLB
494 pVCpu->iem.s.cbInstrBufTotal = 0;
495 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
496 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
497 { /* very likely */ }
498 else
499 {
500 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
501 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
502 while (i-- > 0)
503 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
504 }
505# endif
506
507# ifdef IEM_WITH_DATA_TLB
508 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
509 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
510 { /* very likely */ }
511 else
512 {
513 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
515 while (i-- > 0)
516 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
517 }
518# endif
519#else
520 RT_NOREF(pVCpu);
521#endif
522}
523
524
525/**
526 * Invalidates a page in the TLBs.
527 *
528 * @param pVCpu The cross context virtual CPU structure of the calling
529 * thread.
530 * @param GCPtr The address of the page to invalidate
531 * @thread EMT(pVCpu)
532 */
533VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
534{
535#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
536 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
537 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
538 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
539
540# ifdef IEM_WITH_CODE_TLB
541 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
542 {
543 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
544 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
545 pVCpu->iem.s.cbInstrBufTotal = 0;
546 }
547# endif
548
549# ifdef IEM_WITH_DATA_TLB
550 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
551 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
552# endif
553#else
554 NOREF(pVCpu); NOREF(GCPtr);
555#endif
556}
557
558
559#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
560/**
561 * Invalid both TLBs slow fashion following a rollover.
562 *
563 * Worker for IEMTlbInvalidateAllPhysical,
564 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
565 * iemMemMapJmp and others.
566 *
567 * @thread EMT(pVCpu)
568 */
569static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
570{
571 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
572 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
573 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
574
575 unsigned i;
576# ifdef IEM_WITH_CODE_TLB
577 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
578 while (i-- > 0)
579 {
580 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
581 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
582 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
583 }
584# endif
585# ifdef IEM_WITH_DATA_TLB
586 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
587 while (i-- > 0)
588 {
589 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
590 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
591 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
592 }
593# endif
594
595}
596#endif
597
598
599/**
600 * Invalidates the host physical aspects of the IEM TLBs.
601 *
602 * This is called internally as well as by PGM when moving GC mappings.
603 *
604 * @param pVCpu The cross context virtual CPU structure of the calling
605 * thread.
606 * @note Currently not used.
607 */
608VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
609{
610#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
611 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
612 Log10(("IEMTlbInvalidateAllPhysical\n"));
613
614# ifdef IEM_WITH_CODE_TLB
615 pVCpu->iem.s.cbInstrBufTotal = 0;
616# endif
617 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
618 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
619 {
620 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
621 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
622 }
623 else
624 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
625#else
626 NOREF(pVCpu);
627#endif
628}
629
630
631/**
632 * Invalidates the host physical aspects of the IEM TLBs.
633 *
634 * This is called internally as well as by PGM when moving GC mappings.
635 *
636 * @param pVM The cross context VM structure.
637 * @param idCpuCaller The ID of the calling EMT if available to the caller,
638 * otherwise NIL_VMCPUID.
639 *
640 * @remarks Caller holds the PGM lock.
641 */
642VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
643{
644#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
645 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
646 if (pVCpuCaller)
647 VMCPU_ASSERT_EMT(pVCpuCaller);
648 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
649
650 VMCC_FOR_EACH_VMCPU(pVM)
651 {
652# ifdef IEM_WITH_CODE_TLB
653 if (pVCpuCaller == pVCpu)
654 pVCpu->iem.s.cbInstrBufTotal = 0;
655# endif
656
657 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
658 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
659 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
660 { /* likely */}
661 else if (pVCpuCaller == pVCpu)
662 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
663 else
664 {
665 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
666 continue;
667 }
668 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
669 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
670 }
671 VMCC_FOR_EACH_VMCPU_END(pVM);
672
673#else
674 RT_NOREF(pVM, idCpuCaller);
675#endif
676}
677
678#ifdef IEM_WITH_CODE_TLB
679
680/**
681 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
682 * failure and jumps.
683 *
684 * We end up here for a number of reasons:
685 * - pbInstrBuf isn't yet initialized.
686 * - Advancing beyond the buffer boundrary (e.g. cross page).
687 * - Advancing beyond the CS segment limit.
688 * - Fetching from non-mappable page (e.g. MMIO).
689 *
690 * @param pVCpu The cross context virtual CPU structure of the
691 * calling thread.
692 * @param pvDst Where to return the bytes.
693 * @param cbDst Number of bytes to read.
694 *
695 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
696 */
697void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
698{
699#ifdef IN_RING3
700 for (;;)
701 {
702 Assert(cbDst <= 8);
703 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
704
705 /*
706 * We might have a partial buffer match, deal with that first to make the
707 * rest simpler. This is the first part of the cross page/buffer case.
708 */
709 if (pVCpu->iem.s.pbInstrBuf != NULL)
710 {
711 if (offBuf < pVCpu->iem.s.cbInstrBuf)
712 {
713 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
714 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
715 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
716
717 cbDst -= cbCopy;
718 pvDst = (uint8_t *)pvDst + cbCopy;
719 offBuf += cbCopy;
720 pVCpu->iem.s.offInstrNextByte += offBuf;
721 }
722 }
723
724 /*
725 * Check segment limit, figuring how much we're allowed to access at this point.
726 *
727 * We will fault immediately if RIP is past the segment limit / in non-canonical
728 * territory. If we do continue, there are one or more bytes to read before we
729 * end up in trouble and we need to do that first before faulting.
730 */
731 RTGCPTR GCPtrFirst;
732 uint32_t cbMaxRead;
733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
734 {
735 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
736 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
737 { /* likely */ }
738 else
739 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
740 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
741 }
742 else
743 {
744 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
745 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
746 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
747 { /* likely */ }
748 else
749 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
750 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
751 if (cbMaxRead != 0)
752 { /* likely */ }
753 else
754 {
755 /* Overflowed because address is 0 and limit is max. */
756 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
757 cbMaxRead = X86_PAGE_SIZE;
758 }
759 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
760 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
761 if (cbMaxRead2 < cbMaxRead)
762 cbMaxRead = cbMaxRead2;
763 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
764 }
765
766 /*
767 * Get the TLB entry for this piece of code.
768 */
769 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
770 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
771 if (pTlbe->uTag == uTag)
772 {
773 /* likely when executing lots of code, otherwise unlikely */
774# ifdef VBOX_WITH_STATISTICS
775 pVCpu->iem.s.CodeTlb.cTlbHits++;
776# endif
777 }
778 else
779 {
780 pVCpu->iem.s.CodeTlb.cTlbMisses++;
781 PGMPTWALK Walk;
782 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
783 if (RT_FAILURE(rc))
784 {
785#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
786 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
787 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
788#endif
789 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
790 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
791 }
792
793 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
794 Assert(Walk.fSucceeded);
795 pTlbe->uTag = uTag;
796 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
797 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
798 pTlbe->GCPhys = Walk.GCPhys;
799 pTlbe->pbMappingR3 = NULL;
800 }
801
802 /*
803 * Check TLB page table level access flags.
804 */
805 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
806 {
807 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
808 {
809 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
810 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
811 }
812 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
813 {
814 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
815 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
816 }
817 }
818
819 /*
820 * Look up the physical page info if necessary.
821 */
822 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
823 { /* not necessary */ }
824 else
825 {
826 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
827 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
828 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
829 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
830 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
831 { /* likely */ }
832 else
833 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
834 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
835 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
836 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
837 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
838 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
839 }
840
841# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
842 /*
843 * Try do a direct read using the pbMappingR3 pointer.
844 */
845 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
846 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
847 {
848 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
849 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
850 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
851 {
852 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
853 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
854 }
855 else
856 {
857 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
858 Assert(cbInstr < cbMaxRead);
859 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
860 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
861 }
862 if (cbDst <= cbMaxRead)
863 {
864 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
865 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
866 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
867 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
868 return;
869 }
870 pVCpu->iem.s.pbInstrBuf = NULL;
871
872 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
873 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
874 }
875 else
876# endif
877#if 0
878 /*
879 * If there is no special read handling, so we can read a bit more and
880 * put it in the prefetch buffer.
881 */
882 if ( cbDst < cbMaxRead
883 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
884 {
885 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
886 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
887 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
888 { /* likely */ }
889 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
890 {
891 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
893 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
894 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
895 }
896 else
897 {
898 Log((RT_SUCCESS(rcStrict)
899 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
900 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
901 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
902 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
903 }
904 }
905 /*
906 * Special read handling, so only read exactly what's needed.
907 * This is a highly unlikely scenario.
908 */
909 else
910#endif
911 {
912 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
913 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
914 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
915 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
916 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
917 { /* likely */ }
918 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
919 {
920 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
922 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
923 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
924 }
925 else
926 {
927 Log((RT_SUCCESS(rcStrict)
928 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
929 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
930 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
931 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
932 }
933 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
934 if (cbToRead == cbDst)
935 return;
936 }
937
938 /*
939 * More to read, loop.
940 */
941 cbDst -= cbMaxRead;
942 pvDst = (uint8_t *)pvDst + cbMaxRead;
943 }
944#else
945 RT_NOREF(pvDst, cbDst);
946 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
947#endif
948}
949
950#else
951
952/**
953 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
954 * exception if it fails.
955 *
956 * @returns Strict VBox status code.
957 * @param pVCpu The cross context virtual CPU structure of the
958 * calling thread.
959 * @param cbMin The minimum number of bytes relative offOpcode
960 * that must be read.
961 */
962VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
963{
964 /*
965 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
966 *
967 * First translate CS:rIP to a physical address.
968 */
969 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
970 uint32_t cbToTryRead;
971 RTGCPTR GCPtrNext;
972 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
973 {
974 cbToTryRead = GUEST_PAGE_SIZE;
975 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
976 if (!IEM_IS_CANONICAL(GCPtrNext))
977 return iemRaiseGeneralProtectionFault0(pVCpu);
978 }
979 else
980 {
981 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
982 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
983 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
984 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
985 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
986 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
987 if (!cbToTryRead) /* overflowed */
988 {
989 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
990 cbToTryRead = UINT32_MAX;
991 /** @todo check out wrapping around the code segment. */
992 }
993 if (cbToTryRead < cbMin - cbLeft)
994 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
995 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
996 }
997
998 /* Only read up to the end of the page, and make sure we don't read more
999 than the opcode buffer can hold. */
1000 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1001 if (cbToTryRead > cbLeftOnPage)
1002 cbToTryRead = cbLeftOnPage;
1003 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1004 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1005/** @todo r=bird: Convert assertion into undefined opcode exception? */
1006 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1007
1008 PGMPTWALK Walk;
1009 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1010 if (RT_FAILURE(rc))
1011 {
1012 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1014 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1015 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1016#endif
1017 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1018 }
1019 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1023 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1024 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1025#endif
1026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1029 {
1030 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1031#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1032 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1033 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1034#endif
1035 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1036 }
1037 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1038 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1039 /** @todo Check reserved bits and such stuff. PGM is better at doing
1040 * that, so do it when implementing the guest virtual address
1041 * TLB... */
1042
1043 /*
1044 * Read the bytes at this address.
1045 *
1046 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1047 * and since PATM should only patch the start of an instruction there
1048 * should be no need to check again here.
1049 */
1050 if (!pVCpu->iem.s.fBypassHandlers)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1053 cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 }
1062 else
1063 {
1064 Log((RT_SUCCESS(rcStrict)
1065 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1066 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1067 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1068 return rcStrict;
1069 }
1070 }
1071 else
1072 {
1073 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1074 if (RT_SUCCESS(rc))
1075 { /* likely */ }
1076 else
1077 {
1078 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1079 return rc;
1080 }
1081 }
1082 pVCpu->iem.s.cbOpcode += cbToTryRead;
1083 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1084
1085 return VINF_SUCCESS;
1086}
1087
1088#endif /* !IEM_WITH_CODE_TLB */
1089#ifndef IEM_WITH_SETJMP
1090
1091/**
1092 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1093 *
1094 * @returns Strict VBox status code.
1095 * @param pVCpu The cross context virtual CPU structure of the
1096 * calling thread.
1097 * @param pb Where to return the opcode byte.
1098 */
1099VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1100{
1101 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1102 if (rcStrict == VINF_SUCCESS)
1103 {
1104 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1105 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1106 pVCpu->iem.s.offOpcode = offOpcode + 1;
1107 }
1108 else
1109 *pb = 0;
1110 return rcStrict;
1111}
1112
1113#else /* IEM_WITH_SETJMP */
1114
1115/**
1116 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1117 *
1118 * @returns The opcode byte.
1119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1120 */
1121uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1122{
1123# ifdef IEM_WITH_CODE_TLB
1124 uint8_t u8;
1125 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1126 return u8;
1127# else
1128 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1129 if (rcStrict == VINF_SUCCESS)
1130 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1131 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1132# endif
1133}
1134
1135#endif /* IEM_WITH_SETJMP */
1136
1137#ifndef IEM_WITH_SETJMP
1138
1139/**
1140 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1141 *
1142 * @returns Strict VBox status code.
1143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1144 * @param pu16 Where to return the opcode dword.
1145 */
1146VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1147{
1148 uint8_t u8;
1149 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1150 if (rcStrict == VINF_SUCCESS)
1151 *pu16 = (int8_t)u8;
1152 return rcStrict;
1153}
1154
1155
1156/**
1157 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1158 *
1159 * @returns Strict VBox status code.
1160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1161 * @param pu32 Where to return the opcode dword.
1162 */
1163VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1164{
1165 uint8_t u8;
1166 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1167 if (rcStrict == VINF_SUCCESS)
1168 *pu32 = (int8_t)u8;
1169 return rcStrict;
1170}
1171
1172
1173/**
1174 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1175 *
1176 * @returns Strict VBox status code.
1177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1178 * @param pu64 Where to return the opcode qword.
1179 */
1180VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1181{
1182 uint8_t u8;
1183 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1184 if (rcStrict == VINF_SUCCESS)
1185 *pu64 = (int8_t)u8;
1186 return rcStrict;
1187}
1188
1189#endif /* !IEM_WITH_SETJMP */
1190
1191
1192#ifndef IEM_WITH_SETJMP
1193
1194/**
1195 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1196 *
1197 * @returns Strict VBox status code.
1198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1199 * @param pu16 Where to return the opcode word.
1200 */
1201VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1202{
1203 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1204 if (rcStrict == VINF_SUCCESS)
1205 {
1206 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1207# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1208 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1209# else
1210 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1211# endif
1212 pVCpu->iem.s.offOpcode = offOpcode + 2;
1213 }
1214 else
1215 *pu16 = 0;
1216 return rcStrict;
1217}
1218
1219#else /* IEM_WITH_SETJMP */
1220
1221/**
1222 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1223 *
1224 * @returns The opcode word.
1225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1226 */
1227uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1228{
1229# ifdef IEM_WITH_CODE_TLB
1230 uint16_t u16;
1231 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1232 return u16;
1233# else
1234 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1235 if (rcStrict == VINF_SUCCESS)
1236 {
1237 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1238 pVCpu->iem.s.offOpcode += 2;
1239# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1240 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1241# else
1242 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1243# endif
1244 }
1245 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1246# endif
1247}
1248
1249#endif /* IEM_WITH_SETJMP */
1250
1251#ifndef IEM_WITH_SETJMP
1252
1253/**
1254 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1255 *
1256 * @returns Strict VBox status code.
1257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1258 * @param pu32 Where to return the opcode double word.
1259 */
1260VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1261{
1262 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1263 if (rcStrict == VINF_SUCCESS)
1264 {
1265 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1266 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1267 pVCpu->iem.s.offOpcode = offOpcode + 2;
1268 }
1269 else
1270 *pu32 = 0;
1271 return rcStrict;
1272}
1273
1274
1275/**
1276 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1280 * @param pu64 Where to return the opcode quad word.
1281 */
1282VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1283{
1284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1285 if (rcStrict == VINF_SUCCESS)
1286 {
1287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1288 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1289 pVCpu->iem.s.offOpcode = offOpcode + 2;
1290 }
1291 else
1292 *pu64 = 0;
1293 return rcStrict;
1294}
1295
1296#endif /* !IEM_WITH_SETJMP */
1297
1298#ifndef IEM_WITH_SETJMP
1299
1300/**
1301 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1302 *
1303 * @returns Strict VBox status code.
1304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1305 * @param pu32 Where to return the opcode dword.
1306 */
1307VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1308{
1309 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1310 if (rcStrict == VINF_SUCCESS)
1311 {
1312 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1313# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1314 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1315# else
1316 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1317 pVCpu->iem.s.abOpcode[offOpcode + 1],
1318 pVCpu->iem.s.abOpcode[offOpcode + 2],
1319 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1320# endif
1321 pVCpu->iem.s.offOpcode = offOpcode + 4;
1322 }
1323 else
1324 *pu32 = 0;
1325 return rcStrict;
1326}
1327
1328#else /* IEM_WITH_SETJMP */
1329
1330/**
1331 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1332 *
1333 * @returns The opcode dword.
1334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1335 */
1336uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1337{
1338# ifdef IEM_WITH_CODE_TLB
1339 uint32_t u32;
1340 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1341 return u32;
1342# else
1343 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1344 if (rcStrict == VINF_SUCCESS)
1345 {
1346 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1347 pVCpu->iem.s.offOpcode = offOpcode + 4;
1348# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1349 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1350# else
1351 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1352 pVCpu->iem.s.abOpcode[offOpcode + 1],
1353 pVCpu->iem.s.abOpcode[offOpcode + 2],
1354 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1355# endif
1356 }
1357 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1358# endif
1359}
1360
1361#endif /* IEM_WITH_SETJMP */
1362
1363#ifndef IEM_WITH_SETJMP
1364
1365/**
1366 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1367 *
1368 * @returns Strict VBox status code.
1369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1370 * @param pu64 Where to return the opcode dword.
1371 */
1372VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1373{
1374 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1375 if (rcStrict == VINF_SUCCESS)
1376 {
1377 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1378 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1379 pVCpu->iem.s.abOpcode[offOpcode + 1],
1380 pVCpu->iem.s.abOpcode[offOpcode + 2],
1381 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1382 pVCpu->iem.s.offOpcode = offOpcode + 4;
1383 }
1384 else
1385 *pu64 = 0;
1386 return rcStrict;
1387}
1388
1389
1390/**
1391 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1392 *
1393 * @returns Strict VBox status code.
1394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1395 * @param pu64 Where to return the opcode qword.
1396 */
1397VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1398{
1399 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1400 if (rcStrict == VINF_SUCCESS)
1401 {
1402 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1403 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1404 pVCpu->iem.s.abOpcode[offOpcode + 1],
1405 pVCpu->iem.s.abOpcode[offOpcode + 2],
1406 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1407 pVCpu->iem.s.offOpcode = offOpcode + 4;
1408 }
1409 else
1410 *pu64 = 0;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416#ifndef IEM_WITH_SETJMP
1417
1418/**
1419 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1420 *
1421 * @returns Strict VBox status code.
1422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1423 * @param pu64 Where to return the opcode qword.
1424 */
1425VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1426{
1427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1428 if (rcStrict == VINF_SUCCESS)
1429 {
1430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1432 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1433# else
1434 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1435 pVCpu->iem.s.abOpcode[offOpcode + 1],
1436 pVCpu->iem.s.abOpcode[offOpcode + 2],
1437 pVCpu->iem.s.abOpcode[offOpcode + 3],
1438 pVCpu->iem.s.abOpcode[offOpcode + 4],
1439 pVCpu->iem.s.abOpcode[offOpcode + 5],
1440 pVCpu->iem.s.abOpcode[offOpcode + 6],
1441 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1442# endif
1443 pVCpu->iem.s.offOpcode = offOpcode + 8;
1444 }
1445 else
1446 *pu64 = 0;
1447 return rcStrict;
1448}
1449
1450#else /* IEM_WITH_SETJMP */
1451
1452/**
1453 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1454 *
1455 * @returns The opcode qword.
1456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1457 */
1458uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1459{
1460# ifdef IEM_WITH_CODE_TLB
1461 uint64_t u64;
1462 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1463 return u64;
1464# else
1465 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1466 if (rcStrict == VINF_SUCCESS)
1467 {
1468 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1469 pVCpu->iem.s.offOpcode = offOpcode + 8;
1470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1471 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1472# else
1473 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1474 pVCpu->iem.s.abOpcode[offOpcode + 1],
1475 pVCpu->iem.s.abOpcode[offOpcode + 2],
1476 pVCpu->iem.s.abOpcode[offOpcode + 3],
1477 pVCpu->iem.s.abOpcode[offOpcode + 4],
1478 pVCpu->iem.s.abOpcode[offOpcode + 5],
1479 pVCpu->iem.s.abOpcode[offOpcode + 6],
1480 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1481# endif
1482 }
1483 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1484# endif
1485}
1486
1487#endif /* IEM_WITH_SETJMP */
1488
1489
1490
1491/** @name Misc Worker Functions.
1492 * @{
1493 */
1494
1495/**
1496 * Gets the exception class for the specified exception vector.
1497 *
1498 * @returns The class of the specified exception.
1499 * @param uVector The exception vector.
1500 */
1501static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1502{
1503 Assert(uVector <= X86_XCPT_LAST);
1504 switch (uVector)
1505 {
1506 case X86_XCPT_DE:
1507 case X86_XCPT_TS:
1508 case X86_XCPT_NP:
1509 case X86_XCPT_SS:
1510 case X86_XCPT_GP:
1511 case X86_XCPT_SX: /* AMD only */
1512 return IEMXCPTCLASS_CONTRIBUTORY;
1513
1514 case X86_XCPT_PF:
1515 case X86_XCPT_VE: /* Intel only */
1516 return IEMXCPTCLASS_PAGE_FAULT;
1517
1518 case X86_XCPT_DF:
1519 return IEMXCPTCLASS_DOUBLE_FAULT;
1520 }
1521 return IEMXCPTCLASS_BENIGN;
1522}
1523
1524
1525/**
1526 * Evaluates how to handle an exception caused during delivery of another event
1527 * (exception / interrupt).
1528 *
1529 * @returns How to handle the recursive exception.
1530 * @param pVCpu The cross context virtual CPU structure of the
1531 * calling thread.
1532 * @param fPrevFlags The flags of the previous event.
1533 * @param uPrevVector The vector of the previous event.
1534 * @param fCurFlags The flags of the current exception.
1535 * @param uCurVector The vector of the current exception.
1536 * @param pfXcptRaiseInfo Where to store additional information about the
1537 * exception condition. Optional.
1538 */
1539VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1540 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1541{
1542 /*
1543 * Only CPU exceptions can be raised while delivering other events, software interrupt
1544 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1545 */
1546 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1547 Assert(pVCpu); RT_NOREF(pVCpu);
1548 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1549
1550 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1551 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1552 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1553 {
1554 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1555 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1556 {
1557 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1558 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1559 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1560 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1561 {
1562 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1563 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1564 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1565 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1566 uCurVector, pVCpu->cpum.GstCtx.cr2));
1567 }
1568 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1569 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1570 {
1571 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1572 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1573 }
1574 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1575 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1576 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1577 {
1578 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1579 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1580 }
1581 }
1582 else
1583 {
1584 if (uPrevVector == X86_XCPT_NMI)
1585 {
1586 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1587 if (uCurVector == X86_XCPT_PF)
1588 {
1589 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1590 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1591 }
1592 }
1593 else if ( uPrevVector == X86_XCPT_AC
1594 && uCurVector == X86_XCPT_AC)
1595 {
1596 enmRaise = IEMXCPTRAISE_CPU_HANG;
1597 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1598 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1599 }
1600 }
1601 }
1602 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1603 {
1604 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1605 if (uCurVector == X86_XCPT_PF)
1606 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1607 }
1608 else
1609 {
1610 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1611 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1612 }
1613
1614 if (pfXcptRaiseInfo)
1615 *pfXcptRaiseInfo = fRaiseInfo;
1616 return enmRaise;
1617}
1618
1619
1620/**
1621 * Enters the CPU shutdown state initiated by a triple fault or other
1622 * unrecoverable conditions.
1623 *
1624 * @returns Strict VBox status code.
1625 * @param pVCpu The cross context virtual CPU structure of the
1626 * calling thread.
1627 */
1628static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1629{
1630 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1631 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1632
1633 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1634 {
1635 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1636 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1637 }
1638
1639 RT_NOREF(pVCpu);
1640 return VINF_EM_TRIPLE_FAULT;
1641}
1642
1643
1644/**
1645 * Validates a new SS segment.
1646 *
1647 * @returns VBox strict status code.
1648 * @param pVCpu The cross context virtual CPU structure of the
1649 * calling thread.
1650 * @param NewSS The new SS selctor.
1651 * @param uCpl The CPL to load the stack for.
1652 * @param pDesc Where to return the descriptor.
1653 */
1654static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1655{
1656 /* Null selectors are not allowed (we're not called for dispatching
1657 interrupts with SS=0 in long mode). */
1658 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1659 {
1660 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1661 return iemRaiseTaskSwitchFault0(pVCpu);
1662 }
1663
1664 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1665 if ((NewSS & X86_SEL_RPL) != uCpl)
1666 {
1667 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1668 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1669 }
1670
1671 /*
1672 * Read the descriptor.
1673 */
1674 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1675 if (rcStrict != VINF_SUCCESS)
1676 return rcStrict;
1677
1678 /*
1679 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1680 */
1681 if (!pDesc->Legacy.Gen.u1DescType)
1682 {
1683 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1684 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1685 }
1686
1687 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1688 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1689 {
1690 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1691 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1692 }
1693 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1694 {
1695 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1696 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1697 }
1698
1699 /* Is it there? */
1700 /** @todo testcase: Is this checked before the canonical / limit check below? */
1701 if (!pDesc->Legacy.Gen.u1Present)
1702 {
1703 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1704 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1705 }
1706
1707 return VINF_SUCCESS;
1708}
1709
1710/** @} */
1711
1712
1713/** @name Raising Exceptions.
1714 *
1715 * @{
1716 */
1717
1718
1719/**
1720 * Loads the specified stack far pointer from the TSS.
1721 *
1722 * @returns VBox strict status code.
1723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1724 * @param uCpl The CPL to load the stack for.
1725 * @param pSelSS Where to return the new stack segment.
1726 * @param puEsp Where to return the new stack pointer.
1727 */
1728static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1729{
1730 VBOXSTRICTRC rcStrict;
1731 Assert(uCpl < 4);
1732
1733 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1734 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1735 {
1736 /*
1737 * 16-bit TSS (X86TSS16).
1738 */
1739 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1740 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1741 {
1742 uint32_t off = uCpl * 4 + 2;
1743 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1744 {
1745 /** @todo check actual access pattern here. */
1746 uint32_t u32Tmp = 0; /* gcc maybe... */
1747 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1748 if (rcStrict == VINF_SUCCESS)
1749 {
1750 *puEsp = RT_LOWORD(u32Tmp);
1751 *pSelSS = RT_HIWORD(u32Tmp);
1752 return VINF_SUCCESS;
1753 }
1754 }
1755 else
1756 {
1757 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1758 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1759 }
1760 break;
1761 }
1762
1763 /*
1764 * 32-bit TSS (X86TSS32).
1765 */
1766 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1767 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1768 {
1769 uint32_t off = uCpl * 8 + 4;
1770 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1771 {
1772/** @todo check actual access pattern here. */
1773 uint64_t u64Tmp;
1774 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1775 if (rcStrict == VINF_SUCCESS)
1776 {
1777 *puEsp = u64Tmp & UINT32_MAX;
1778 *pSelSS = (RTSEL)(u64Tmp >> 32);
1779 return VINF_SUCCESS;
1780 }
1781 }
1782 else
1783 {
1784 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1785 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1786 }
1787 break;
1788 }
1789
1790 default:
1791 AssertFailed();
1792 rcStrict = VERR_IEM_IPE_4;
1793 break;
1794 }
1795
1796 *puEsp = 0; /* make gcc happy */
1797 *pSelSS = 0; /* make gcc happy */
1798 return rcStrict;
1799}
1800
1801
1802/**
1803 * Loads the specified stack pointer from the 64-bit TSS.
1804 *
1805 * @returns VBox strict status code.
1806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1807 * @param uCpl The CPL to load the stack for.
1808 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1809 * @param puRsp Where to return the new stack pointer.
1810 */
1811static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1812{
1813 Assert(uCpl < 4);
1814 Assert(uIst < 8);
1815 *puRsp = 0; /* make gcc happy */
1816
1817 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1818 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1819
1820 uint32_t off;
1821 if (uIst)
1822 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1823 else
1824 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1825 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1826 {
1827 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1828 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1829 }
1830
1831 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1832}
1833
1834
1835/**
1836 * Adjust the CPU state according to the exception being raised.
1837 *
1838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1839 * @param u8Vector The exception that has been raised.
1840 */
1841DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1842{
1843 switch (u8Vector)
1844 {
1845 case X86_XCPT_DB:
1846 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1847 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1848 break;
1849 /** @todo Read the AMD and Intel exception reference... */
1850 }
1851}
1852
1853
1854/**
1855 * Implements exceptions and interrupts for real mode.
1856 *
1857 * @returns VBox strict status code.
1858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1859 * @param cbInstr The number of bytes to offset rIP by in the return
1860 * address.
1861 * @param u8Vector The interrupt / exception vector number.
1862 * @param fFlags The flags.
1863 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1864 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1865 */
1866static VBOXSTRICTRC
1867iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1868 uint8_t cbInstr,
1869 uint8_t u8Vector,
1870 uint32_t fFlags,
1871 uint16_t uErr,
1872 uint64_t uCr2) RT_NOEXCEPT
1873{
1874 NOREF(uErr); NOREF(uCr2);
1875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1876
1877 /*
1878 * Read the IDT entry.
1879 */
1880 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1881 {
1882 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1883 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1884 }
1885 RTFAR16 Idte;
1886 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1887 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1888 {
1889 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1890 return rcStrict;
1891 }
1892
1893 /*
1894 * Push the stack frame.
1895 */
1896 uint16_t *pu16Frame;
1897 uint64_t uNewRsp;
1898 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
1899 if (rcStrict != VINF_SUCCESS)
1900 return rcStrict;
1901
1902 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1903#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1904 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1905 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1906 fEfl |= UINT16_C(0xf000);
1907#endif
1908 pu16Frame[2] = (uint16_t)fEfl;
1909 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1910 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1911 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1912 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1913 return rcStrict;
1914
1915 /*
1916 * Load the vector address into cs:ip and make exception specific state
1917 * adjustments.
1918 */
1919 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1920 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1921 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1922 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1923 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1924 pVCpu->cpum.GstCtx.rip = Idte.off;
1925 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1926 IEMMISC_SET_EFL(pVCpu, fEfl);
1927
1928 /** @todo do we actually do this in real mode? */
1929 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1930 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1931
1932 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1933}
1934
1935
1936/**
1937 * Loads a NULL data selector into when coming from V8086 mode.
1938 *
1939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1940 * @param pSReg Pointer to the segment register.
1941 */
1942DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1943{
1944 pSReg->Sel = 0;
1945 pSReg->ValidSel = 0;
1946 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1947 {
1948 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1949 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1950 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1951 }
1952 else
1953 {
1954 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1955 /** @todo check this on AMD-V */
1956 pSReg->u64Base = 0;
1957 pSReg->u32Limit = 0;
1958 }
1959}
1960
1961
1962/**
1963 * Loads a segment selector during a task switch in V8086 mode.
1964 *
1965 * @param pSReg Pointer to the segment register.
1966 * @param uSel The selector value to load.
1967 */
1968DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1969{
1970 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1971 pSReg->Sel = uSel;
1972 pSReg->ValidSel = uSel;
1973 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1974 pSReg->u64Base = uSel << 4;
1975 pSReg->u32Limit = 0xffff;
1976 pSReg->Attr.u = 0xf3;
1977}
1978
1979
1980/**
1981 * Loads a segment selector during a task switch in protected mode.
1982 *
1983 * In this task switch scenario, we would throw \#TS exceptions rather than
1984 * \#GPs.
1985 *
1986 * @returns VBox strict status code.
1987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1988 * @param pSReg Pointer to the segment register.
1989 * @param uSel The new selector value.
1990 *
1991 * @remarks This does _not_ handle CS or SS.
1992 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
1993 */
1994static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
1995{
1996 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
1997
1998 /* Null data selector. */
1999 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2000 {
2001 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2003 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2004 return VINF_SUCCESS;
2005 }
2006
2007 /* Fetch the descriptor. */
2008 IEMSELDESC Desc;
2009 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2010 if (rcStrict != VINF_SUCCESS)
2011 {
2012 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2013 VBOXSTRICTRC_VAL(rcStrict)));
2014 return rcStrict;
2015 }
2016
2017 /* Must be a data segment or readable code segment. */
2018 if ( !Desc.Legacy.Gen.u1DescType
2019 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2020 {
2021 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2022 Desc.Legacy.Gen.u4Type));
2023 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2024 }
2025
2026 /* Check privileges for data segments and non-conforming code segments. */
2027 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2028 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2029 {
2030 /* The RPL and the new CPL must be less than or equal to the DPL. */
2031 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2032 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2033 {
2034 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2035 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2036 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2037 }
2038 }
2039
2040 /* Is it there? */
2041 if (!Desc.Legacy.Gen.u1Present)
2042 {
2043 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2044 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2045 }
2046
2047 /* The base and limit. */
2048 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2049 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2050
2051 /*
2052 * Ok, everything checked out fine. Now set the accessed bit before
2053 * committing the result into the registers.
2054 */
2055 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2056 {
2057 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2058 if (rcStrict != VINF_SUCCESS)
2059 return rcStrict;
2060 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2061 }
2062
2063 /* Commit */
2064 pSReg->Sel = uSel;
2065 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2066 pSReg->u32Limit = cbLimit;
2067 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2068 pSReg->ValidSel = uSel;
2069 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2070 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2071 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2072
2073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2074 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2075 return VINF_SUCCESS;
2076}
2077
2078
2079/**
2080 * Performs a task switch.
2081 *
2082 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2083 * caller is responsible for performing the necessary checks (like DPL, TSS
2084 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2085 * reference for JMP, CALL, IRET.
2086 *
2087 * If the task switch is the due to a software interrupt or hardware exception,
2088 * the caller is responsible for validating the TSS selector and descriptor. See
2089 * Intel Instruction reference for INT n.
2090 *
2091 * @returns VBox strict status code.
2092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2093 * @param enmTaskSwitch The cause of the task switch.
2094 * @param uNextEip The EIP effective after the task switch.
2095 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2096 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2097 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2098 * @param SelTSS The TSS selector of the new task.
2099 * @param pNewDescTSS Pointer to the new TSS descriptor.
2100 */
2101VBOXSTRICTRC
2102iemTaskSwitch(PVMCPUCC pVCpu,
2103 IEMTASKSWITCH enmTaskSwitch,
2104 uint32_t uNextEip,
2105 uint32_t fFlags,
2106 uint16_t uErr,
2107 uint64_t uCr2,
2108 RTSEL SelTSS,
2109 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2110{
2111 Assert(!IEM_IS_REAL_MODE(pVCpu));
2112 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2114
2115 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2116 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2117 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2118 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2119 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2120
2121 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2122 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2123
2124 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2125 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2126
2127 /* Update CR2 in case it's a page-fault. */
2128 /** @todo This should probably be done much earlier in IEM/PGM. See
2129 * @bugref{5653#c49}. */
2130 if (fFlags & IEM_XCPT_FLAGS_CR2)
2131 pVCpu->cpum.GstCtx.cr2 = uCr2;
2132
2133 /*
2134 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2135 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2136 */
2137 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2138 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2139 if (uNewTSSLimit < uNewTSSLimitMin)
2140 {
2141 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2142 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2143 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2144 }
2145
2146 /*
2147 * Task switches in VMX non-root mode always cause task switches.
2148 * The new TSS must have been read and validated (DPL, limits etc.) before a
2149 * task-switch VM-exit commences.
2150 *
2151 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2152 */
2153 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2154 {
2155 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2156 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2157 }
2158
2159 /*
2160 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2161 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2162 */
2163 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2164 {
2165 uint32_t const uExitInfo1 = SelTSS;
2166 uint32_t uExitInfo2 = uErr;
2167 switch (enmTaskSwitch)
2168 {
2169 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2170 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2171 default: break;
2172 }
2173 if (fFlags & IEM_XCPT_FLAGS_ERR)
2174 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2175 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2176 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2177
2178 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2179 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2180 RT_NOREF2(uExitInfo1, uExitInfo2);
2181 }
2182
2183 /*
2184 * Check the current TSS limit. The last written byte to the current TSS during the
2185 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2186 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2187 *
2188 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2189 * end up with smaller than "legal" TSS limits.
2190 */
2191 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2192 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2193 if (uCurTSSLimit < uCurTSSLimitMin)
2194 {
2195 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2196 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2197 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2198 }
2199
2200 /*
2201 * Verify that the new TSS can be accessed and map it. Map only the required contents
2202 * and not the entire TSS.
2203 */
2204 void *pvNewTSS;
2205 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2206 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2207 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2208 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2209 * not perform correct translation if this happens. See Intel spec. 7.2.1
2210 * "Task-State Segment". */
2211 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2212 if (rcStrict != VINF_SUCCESS)
2213 {
2214 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2215 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2216 return rcStrict;
2217 }
2218
2219 /*
2220 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2221 */
2222 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2223 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2224 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2225 {
2226 PX86DESC pDescCurTSS;
2227 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2228 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2229 if (rcStrict != VINF_SUCCESS)
2230 {
2231 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2232 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2233 return rcStrict;
2234 }
2235
2236 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2237 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2238 if (rcStrict != VINF_SUCCESS)
2239 {
2240 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2241 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2242 return rcStrict;
2243 }
2244
2245 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2246 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2247 {
2248 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2249 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2250 u32EFlags &= ~X86_EFL_NT;
2251 }
2252 }
2253
2254 /*
2255 * Save the CPU state into the current TSS.
2256 */
2257 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2258 if (GCPtrNewTSS == GCPtrCurTSS)
2259 {
2260 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2261 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2262 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2263 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2264 pVCpu->cpum.GstCtx.ldtr.Sel));
2265 }
2266 if (fIsNewTSS386)
2267 {
2268 /*
2269 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2270 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2271 */
2272 void *pvCurTSS32;
2273 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2274 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2275 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2276 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2277 if (rcStrict != VINF_SUCCESS)
2278 {
2279 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2280 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2281 return rcStrict;
2282 }
2283
2284 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2285 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2286 pCurTSS32->eip = uNextEip;
2287 pCurTSS32->eflags = u32EFlags;
2288 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2289 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2290 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2291 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2292 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2293 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2294 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2295 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2296 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2297 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2298 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2299 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2300 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2301 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2302
2303 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2304 if (rcStrict != VINF_SUCCESS)
2305 {
2306 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2307 VBOXSTRICTRC_VAL(rcStrict)));
2308 return rcStrict;
2309 }
2310 }
2311 else
2312 {
2313 /*
2314 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2315 */
2316 void *pvCurTSS16;
2317 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2318 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2319 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2320 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2321 if (rcStrict != VINF_SUCCESS)
2322 {
2323 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2324 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2325 return rcStrict;
2326 }
2327
2328 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2329 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2330 pCurTSS16->ip = uNextEip;
2331 pCurTSS16->flags = u32EFlags;
2332 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2333 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2334 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2335 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2336 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2337 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2338 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2339 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2340 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2341 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2342 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2343 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2344
2345 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2346 if (rcStrict != VINF_SUCCESS)
2347 {
2348 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2349 VBOXSTRICTRC_VAL(rcStrict)));
2350 return rcStrict;
2351 }
2352 }
2353
2354 /*
2355 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2356 */
2357 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2358 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2359 {
2360 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2361 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2362 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2363 }
2364
2365 /*
2366 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2367 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2368 */
2369 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2370 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2371 bool fNewDebugTrap;
2372 if (fIsNewTSS386)
2373 {
2374 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2375 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2376 uNewEip = pNewTSS32->eip;
2377 uNewEflags = pNewTSS32->eflags;
2378 uNewEax = pNewTSS32->eax;
2379 uNewEcx = pNewTSS32->ecx;
2380 uNewEdx = pNewTSS32->edx;
2381 uNewEbx = pNewTSS32->ebx;
2382 uNewEsp = pNewTSS32->esp;
2383 uNewEbp = pNewTSS32->ebp;
2384 uNewEsi = pNewTSS32->esi;
2385 uNewEdi = pNewTSS32->edi;
2386 uNewES = pNewTSS32->es;
2387 uNewCS = pNewTSS32->cs;
2388 uNewSS = pNewTSS32->ss;
2389 uNewDS = pNewTSS32->ds;
2390 uNewFS = pNewTSS32->fs;
2391 uNewGS = pNewTSS32->gs;
2392 uNewLdt = pNewTSS32->selLdt;
2393 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2394 }
2395 else
2396 {
2397 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2398 uNewCr3 = 0;
2399 uNewEip = pNewTSS16->ip;
2400 uNewEflags = pNewTSS16->flags;
2401 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2402 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2403 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2404 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2405 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2406 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2407 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2408 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2409 uNewES = pNewTSS16->es;
2410 uNewCS = pNewTSS16->cs;
2411 uNewSS = pNewTSS16->ss;
2412 uNewDS = pNewTSS16->ds;
2413 uNewFS = 0;
2414 uNewGS = 0;
2415 uNewLdt = pNewTSS16->selLdt;
2416 fNewDebugTrap = false;
2417 }
2418
2419 if (GCPtrNewTSS == GCPtrCurTSS)
2420 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2421 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2422
2423 /*
2424 * We're done accessing the new TSS.
2425 */
2426 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2430 return rcStrict;
2431 }
2432
2433 /*
2434 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2435 */
2436 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2437 {
2438 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2439 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2440 if (rcStrict != VINF_SUCCESS)
2441 {
2442 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2443 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2444 return rcStrict;
2445 }
2446
2447 /* Check that the descriptor indicates the new TSS is available (not busy). */
2448 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2449 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2450 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2451
2452 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2453 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2454 if (rcStrict != VINF_SUCCESS)
2455 {
2456 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2457 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2458 return rcStrict;
2459 }
2460 }
2461
2462 /*
2463 * From this point on, we're technically in the new task. We will defer exceptions
2464 * until the completion of the task switch but before executing any instructions in the new task.
2465 */
2466 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2467 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2468 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2469 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2470 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2471 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2472 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2473
2474 /* Set the busy bit in TR. */
2475 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2476
2477 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2478 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2479 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2480 {
2481 uNewEflags |= X86_EFL_NT;
2482 }
2483
2484 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2485 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2486 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2487
2488 pVCpu->cpum.GstCtx.eip = uNewEip;
2489 pVCpu->cpum.GstCtx.eax = uNewEax;
2490 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2491 pVCpu->cpum.GstCtx.edx = uNewEdx;
2492 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2493 pVCpu->cpum.GstCtx.esp = uNewEsp;
2494 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2495 pVCpu->cpum.GstCtx.esi = uNewEsi;
2496 pVCpu->cpum.GstCtx.edi = uNewEdi;
2497
2498 uNewEflags &= X86_EFL_LIVE_MASK;
2499 uNewEflags |= X86_EFL_RA1_MASK;
2500 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2501
2502 /*
2503 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2504 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2505 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2506 */
2507 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2508 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2509
2510 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2511 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2512
2513 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2514 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2515
2516 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2517 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2518
2519 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2520 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2521
2522 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2523 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2524 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2525
2526 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2527 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2528 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2529 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2530
2531 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2532 {
2533 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2534 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2535 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2536 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2537 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2538 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2539 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2540 }
2541
2542 /*
2543 * Switch CR3 for the new task.
2544 */
2545 if ( fIsNewTSS386
2546 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2547 {
2548 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2549 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2550 AssertRCSuccessReturn(rc, rc);
2551
2552 /* Inform PGM. */
2553 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2554 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2555 AssertRCReturn(rc, rc);
2556 /* ignore informational status codes */
2557
2558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2559 }
2560
2561 /*
2562 * Switch LDTR for the new task.
2563 */
2564 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2565 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2566 else
2567 {
2568 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2569
2570 IEMSELDESC DescNewLdt;
2571 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2572 if (rcStrict != VINF_SUCCESS)
2573 {
2574 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2575 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2576 return rcStrict;
2577 }
2578 if ( !DescNewLdt.Legacy.Gen.u1Present
2579 || DescNewLdt.Legacy.Gen.u1DescType
2580 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2581 {
2582 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2583 uNewLdt, DescNewLdt.Legacy.u));
2584 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2585 }
2586
2587 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2588 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2589 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2590 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2591 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2592 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2593 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2595 }
2596
2597 IEMSELDESC DescSS;
2598 if (IEM_IS_V86_MODE(pVCpu))
2599 {
2600 pVCpu->iem.s.uCpl = 3;
2601 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2602 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2603 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2604 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2605 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2606 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2607
2608 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2609 DescSS.Legacy.u = 0;
2610 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2611 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2612 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2613 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2614 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2615 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2616 DescSS.Legacy.Gen.u2Dpl = 3;
2617 }
2618 else
2619 {
2620 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2621
2622 /*
2623 * Load the stack segment for the new task.
2624 */
2625 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2626 {
2627 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2628 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2629 }
2630
2631 /* Fetch the descriptor. */
2632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2636 VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* SS must be a data segment and writable. */
2641 if ( !DescSS.Legacy.Gen.u1DescType
2642 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2643 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2644 {
2645 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2646 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2647 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2648 }
2649
2650 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2651 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2652 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2653 {
2654 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2655 uNewCpl));
2656 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2657 }
2658
2659 /* Is it there? */
2660 if (!DescSS.Legacy.Gen.u1Present)
2661 {
2662 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2663 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2664 }
2665
2666 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2667 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2668
2669 /* Set the accessed bit before committing the result into SS. */
2670 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2671 {
2672 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2673 if (rcStrict != VINF_SUCCESS)
2674 return rcStrict;
2675 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2676 }
2677
2678 /* Commit SS. */
2679 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2680 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2681 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2682 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2683 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2684 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2685 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2686
2687 /* CPL has changed, update IEM before loading rest of segments. */
2688 pVCpu->iem.s.uCpl = uNewCpl;
2689
2690 /*
2691 * Load the data segments for the new task.
2692 */
2693 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2694 if (rcStrict != VINF_SUCCESS)
2695 return rcStrict;
2696 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2697 if (rcStrict != VINF_SUCCESS)
2698 return rcStrict;
2699 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2700 if (rcStrict != VINF_SUCCESS)
2701 return rcStrict;
2702 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2703 if (rcStrict != VINF_SUCCESS)
2704 return rcStrict;
2705
2706 /*
2707 * Load the code segment for the new task.
2708 */
2709 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2710 {
2711 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2712 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2713 }
2714
2715 /* Fetch the descriptor. */
2716 IEMSELDESC DescCS;
2717 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2718 if (rcStrict != VINF_SUCCESS)
2719 {
2720 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2721 return rcStrict;
2722 }
2723
2724 /* CS must be a code segment. */
2725 if ( !DescCS.Legacy.Gen.u1DescType
2726 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2727 {
2728 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2729 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2730 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2731 }
2732
2733 /* For conforming CS, DPL must be less than or equal to the RPL. */
2734 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2735 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2736 {
2737 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2738 DescCS.Legacy.Gen.u2Dpl));
2739 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2740 }
2741
2742 /* For non-conforming CS, DPL must match RPL. */
2743 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2744 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2745 {
2746 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2747 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2748 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2749 }
2750
2751 /* Is it there? */
2752 if (!DescCS.Legacy.Gen.u1Present)
2753 {
2754 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2755 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2756 }
2757
2758 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2759 u64Base = X86DESC_BASE(&DescCS.Legacy);
2760
2761 /* Set the accessed bit before committing the result into CS. */
2762 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2763 {
2764 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2765 if (rcStrict != VINF_SUCCESS)
2766 return rcStrict;
2767 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2768 }
2769
2770 /* Commit CS. */
2771 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2772 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2773 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2774 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2775 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2776 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2778 }
2779
2780 /** @todo Debug trap. */
2781 if (fIsNewTSS386 && fNewDebugTrap)
2782 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2783
2784 /*
2785 * Construct the error code masks based on what caused this task switch.
2786 * See Intel Instruction reference for INT.
2787 */
2788 uint16_t uExt;
2789 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2790 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2791 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2792 {
2793 uExt = 1;
2794 }
2795 else
2796 uExt = 0;
2797
2798 /*
2799 * Push any error code on to the new stack.
2800 */
2801 if (fFlags & IEM_XCPT_FLAGS_ERR)
2802 {
2803 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2804 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2805 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2806
2807 /* Check that there is sufficient space on the stack. */
2808 /** @todo Factor out segment limit checking for normal/expand down segments
2809 * into a separate function. */
2810 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2811 {
2812 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2813 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2814 {
2815 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2816 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2817 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2818 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2819 }
2820 }
2821 else
2822 {
2823 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2824 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2825 {
2826 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2827 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2828 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2829 }
2830 }
2831
2832
2833 if (fIsNewTSS386)
2834 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2835 else
2836 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2837 if (rcStrict != VINF_SUCCESS)
2838 {
2839 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2840 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2841 return rcStrict;
2842 }
2843 }
2844
2845 /* Check the new EIP against the new CS limit. */
2846 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2847 {
2848 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2849 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2850 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2851 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2852 }
2853
2854 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2855 pVCpu->cpum.GstCtx.ss.Sel));
2856 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Implements exceptions and interrupts for protected mode.
2862 *
2863 * @returns VBox strict status code.
2864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2865 * @param cbInstr The number of bytes to offset rIP by in the return
2866 * address.
2867 * @param u8Vector The interrupt / exception vector number.
2868 * @param fFlags The flags.
2869 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2870 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2871 */
2872static VBOXSTRICTRC
2873iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2874 uint8_t cbInstr,
2875 uint8_t u8Vector,
2876 uint32_t fFlags,
2877 uint16_t uErr,
2878 uint64_t uCr2) RT_NOEXCEPT
2879{
2880 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2881
2882 /*
2883 * Read the IDT entry.
2884 */
2885 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2886 {
2887 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2888 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2889 }
2890 X86DESC Idte;
2891 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2892 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2893 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2894 {
2895 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2896 return rcStrict;
2897 }
2898 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2899 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2900 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2901
2902 /*
2903 * Check the descriptor type, DPL and such.
2904 * ASSUMES this is done in the same order as described for call-gate calls.
2905 */
2906 if (Idte.Gate.u1DescType)
2907 {
2908 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2909 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2910 }
2911 bool fTaskGate = false;
2912 uint8_t f32BitGate = true;
2913 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2914 switch (Idte.Gate.u4Type)
2915 {
2916 case X86_SEL_TYPE_SYS_UNDEFINED:
2917 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2918 case X86_SEL_TYPE_SYS_LDT:
2919 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2920 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2921 case X86_SEL_TYPE_SYS_UNDEFINED2:
2922 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2923 case X86_SEL_TYPE_SYS_UNDEFINED3:
2924 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2925 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2926 case X86_SEL_TYPE_SYS_UNDEFINED4:
2927 {
2928 /** @todo check what actually happens when the type is wrong...
2929 * esp. call gates. */
2930 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2931 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2932 }
2933
2934 case X86_SEL_TYPE_SYS_286_INT_GATE:
2935 f32BitGate = false;
2936 RT_FALL_THRU();
2937 case X86_SEL_TYPE_SYS_386_INT_GATE:
2938 fEflToClear |= X86_EFL_IF;
2939 break;
2940
2941 case X86_SEL_TYPE_SYS_TASK_GATE:
2942 fTaskGate = true;
2943#ifndef IEM_IMPLEMENTS_TASKSWITCH
2944 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2945#endif
2946 break;
2947
2948 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2949 f32BitGate = false;
2950 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2951 break;
2952
2953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2954 }
2955
2956 /* Check DPL against CPL if applicable. */
2957 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2958 {
2959 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2960 {
2961 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2962 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2963 }
2964 }
2965
2966 /* Is it there? */
2967 if (!Idte.Gate.u1Present)
2968 {
2969 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2970 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2971 }
2972
2973 /* Is it a task-gate? */
2974 if (fTaskGate)
2975 {
2976 /*
2977 * Construct the error code masks based on what caused this task switch.
2978 * See Intel Instruction reference for INT.
2979 */
2980 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2981 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2982 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
2983 RTSEL SelTSS = Idte.Gate.u16Sel;
2984
2985 /*
2986 * Fetch the TSS descriptor in the GDT.
2987 */
2988 IEMSELDESC DescTSS;
2989 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
2990 if (rcStrict != VINF_SUCCESS)
2991 {
2992 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
2993 VBOXSTRICTRC_VAL(rcStrict)));
2994 return rcStrict;
2995 }
2996
2997 /* The TSS descriptor must be a system segment and be available (not busy). */
2998 if ( DescTSS.Legacy.Gen.u1DescType
2999 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3000 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3001 {
3002 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3003 u8Vector, SelTSS, DescTSS.Legacy.au64));
3004 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3005 }
3006
3007 /* The TSS must be present. */
3008 if (!DescTSS.Legacy.Gen.u1Present)
3009 {
3010 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3011 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3012 }
3013
3014 /* Do the actual task switch. */
3015 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3016 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3017 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3018 }
3019
3020 /* A null CS is bad. */
3021 RTSEL NewCS = Idte.Gate.u16Sel;
3022 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3023 {
3024 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3025 return iemRaiseGeneralProtectionFault0(pVCpu);
3026 }
3027
3028 /* Fetch the descriptor for the new CS. */
3029 IEMSELDESC DescCS;
3030 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3031 if (rcStrict != VINF_SUCCESS)
3032 {
3033 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3034 return rcStrict;
3035 }
3036
3037 /* Must be a code segment. */
3038 if (!DescCS.Legacy.Gen.u1DescType)
3039 {
3040 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3041 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3042 }
3043 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3044 {
3045 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3046 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3047 }
3048
3049 /* Don't allow lowering the privilege level. */
3050 /** @todo Does the lowering of privileges apply to software interrupts
3051 * only? This has bearings on the more-privileged or
3052 * same-privilege stack behavior further down. A testcase would
3053 * be nice. */
3054 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3055 {
3056 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3057 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3058 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3059 }
3060
3061 /* Make sure the selector is present. */
3062 if (!DescCS.Legacy.Gen.u1Present)
3063 {
3064 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3065 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3066 }
3067
3068 /* Check the new EIP against the new CS limit. */
3069 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3070 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3071 ? Idte.Gate.u16OffsetLow
3072 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3073 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3074 if (uNewEip > cbLimitCS)
3075 {
3076 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3077 u8Vector, uNewEip, cbLimitCS, NewCS));
3078 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3079 }
3080 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3081
3082 /* Calc the flag image to push. */
3083 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3084 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3085 fEfl &= ~X86_EFL_RF;
3086 else
3087 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3088
3089 /* From V8086 mode only go to CPL 0. */
3090 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3091 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3092 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3093 {
3094 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3095 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3096 }
3097
3098 /*
3099 * If the privilege level changes, we need to get a new stack from the TSS.
3100 * This in turns means validating the new SS and ESP...
3101 */
3102 if (uNewCpl != pVCpu->iem.s.uCpl)
3103 {
3104 RTSEL NewSS;
3105 uint32_t uNewEsp;
3106 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3107 if (rcStrict != VINF_SUCCESS)
3108 return rcStrict;
3109
3110 IEMSELDESC DescSS;
3111 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3112 if (rcStrict != VINF_SUCCESS)
3113 return rcStrict;
3114 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3115 if (!DescSS.Legacy.Gen.u1DefBig)
3116 {
3117 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3118 uNewEsp = (uint16_t)uNewEsp;
3119 }
3120
3121 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3122
3123 /* Check that there is sufficient space for the stack frame. */
3124 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3125 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3126 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3127 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3128
3129 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3130 {
3131 if ( uNewEsp - 1 > cbLimitSS
3132 || uNewEsp < cbStackFrame)
3133 {
3134 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3135 u8Vector, NewSS, uNewEsp, cbStackFrame));
3136 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3137 }
3138 }
3139 else
3140 {
3141 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3142 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3143 {
3144 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3145 u8Vector, NewSS, uNewEsp, cbStackFrame));
3146 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3147 }
3148 }
3149
3150 /*
3151 * Start making changes.
3152 */
3153
3154 /* Set the new CPL so that stack accesses use it. */
3155 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3156 pVCpu->iem.s.uCpl = uNewCpl;
3157
3158 /* Create the stack frame. */
3159 RTPTRUNION uStackFrame;
3160 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3161 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3162 if (rcStrict != VINF_SUCCESS)
3163 return rcStrict;
3164 void * const pvStackFrame = uStackFrame.pv;
3165 if (f32BitGate)
3166 {
3167 if (fFlags & IEM_XCPT_FLAGS_ERR)
3168 *uStackFrame.pu32++ = uErr;
3169 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3170 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3171 uStackFrame.pu32[2] = fEfl;
3172 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3173 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3174 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3175 if (fEfl & X86_EFL_VM)
3176 {
3177 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3178 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3179 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3180 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3181 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3182 }
3183 }
3184 else
3185 {
3186 if (fFlags & IEM_XCPT_FLAGS_ERR)
3187 *uStackFrame.pu16++ = uErr;
3188 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3189 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3190 uStackFrame.pu16[2] = fEfl;
3191 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3192 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3193 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3194 if (fEfl & X86_EFL_VM)
3195 {
3196 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3197 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3198 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3199 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3200 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3201 }
3202 }
3203 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3204 if (rcStrict != VINF_SUCCESS)
3205 return rcStrict;
3206
3207 /* Mark the selectors 'accessed' (hope this is the correct time). */
3208 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3209 * after pushing the stack frame? (Write protect the gdt + stack to
3210 * find out.) */
3211 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3212 {
3213 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3214 if (rcStrict != VINF_SUCCESS)
3215 return rcStrict;
3216 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3217 }
3218
3219 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3220 {
3221 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3222 if (rcStrict != VINF_SUCCESS)
3223 return rcStrict;
3224 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3225 }
3226
3227 /*
3228 * Start comitting the register changes (joins with the DPL=CPL branch).
3229 */
3230 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3231 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3232 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3233 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3234 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3235 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3236 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3237 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3238 * SP is loaded).
3239 * Need to check the other combinations too:
3240 * - 16-bit TSS, 32-bit handler
3241 * - 32-bit TSS, 16-bit handler */
3242 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3243 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3244 else
3245 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3246
3247 if (fEfl & X86_EFL_VM)
3248 {
3249 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3250 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3251 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3252 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3253 }
3254 }
3255 /*
3256 * Same privilege, no stack change and smaller stack frame.
3257 */
3258 else
3259 {
3260 uint64_t uNewRsp;
3261 RTPTRUNION uStackFrame;
3262 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3263 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3264 if (rcStrict != VINF_SUCCESS)
3265 return rcStrict;
3266 void * const pvStackFrame = uStackFrame.pv;
3267
3268 if (f32BitGate)
3269 {
3270 if (fFlags & IEM_XCPT_FLAGS_ERR)
3271 *uStackFrame.pu32++ = uErr;
3272 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3273 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3274 uStackFrame.pu32[2] = fEfl;
3275 }
3276 else
3277 {
3278 if (fFlags & IEM_XCPT_FLAGS_ERR)
3279 *uStackFrame.pu16++ = uErr;
3280 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3281 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3282 uStackFrame.pu16[2] = fEfl;
3283 }
3284 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3285 if (rcStrict != VINF_SUCCESS)
3286 return rcStrict;
3287
3288 /* Mark the CS selector as 'accessed'. */
3289 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3290 {
3291 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3292 if (rcStrict != VINF_SUCCESS)
3293 return rcStrict;
3294 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3295 }
3296
3297 /*
3298 * Start committing the register changes (joins with the other branch).
3299 */
3300 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3301 }
3302
3303 /* ... register committing continues. */
3304 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3305 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3306 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3307 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3308 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3309 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3310
3311 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3312 fEfl &= ~fEflToClear;
3313 IEMMISC_SET_EFL(pVCpu, fEfl);
3314
3315 if (fFlags & IEM_XCPT_FLAGS_CR2)
3316 pVCpu->cpum.GstCtx.cr2 = uCr2;
3317
3318 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3319 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3320
3321 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3322}
3323
3324
3325/**
3326 * Implements exceptions and interrupts for long mode.
3327 *
3328 * @returns VBox strict status code.
3329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3330 * @param cbInstr The number of bytes to offset rIP by in the return
3331 * address.
3332 * @param u8Vector The interrupt / exception vector number.
3333 * @param fFlags The flags.
3334 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3335 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3336 */
3337static VBOXSTRICTRC
3338iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3339 uint8_t cbInstr,
3340 uint8_t u8Vector,
3341 uint32_t fFlags,
3342 uint16_t uErr,
3343 uint64_t uCr2) RT_NOEXCEPT
3344{
3345 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3346
3347 /*
3348 * Read the IDT entry.
3349 */
3350 uint16_t offIdt = (uint16_t)u8Vector << 4;
3351 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3352 {
3353 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3354 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3355 }
3356 X86DESC64 Idte;
3357#ifdef _MSC_VER /* Shut up silly compiler warning. */
3358 Idte.au64[0] = 0;
3359 Idte.au64[1] = 0;
3360#endif
3361 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3362 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3363 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3364 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3365 {
3366 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3367 return rcStrict;
3368 }
3369 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3370 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3371 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3372
3373 /*
3374 * Check the descriptor type, DPL and such.
3375 * ASSUMES this is done in the same order as described for call-gate calls.
3376 */
3377 if (Idte.Gate.u1DescType)
3378 {
3379 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3380 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3381 }
3382 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3383 switch (Idte.Gate.u4Type)
3384 {
3385 case AMD64_SEL_TYPE_SYS_INT_GATE:
3386 fEflToClear |= X86_EFL_IF;
3387 break;
3388 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3389 break;
3390
3391 default:
3392 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3393 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3394 }
3395
3396 /* Check DPL against CPL if applicable. */
3397 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3398 {
3399 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3400 {
3401 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3402 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3403 }
3404 }
3405
3406 /* Is it there? */
3407 if (!Idte.Gate.u1Present)
3408 {
3409 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3410 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3411 }
3412
3413 /* A null CS is bad. */
3414 RTSEL NewCS = Idte.Gate.u16Sel;
3415 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3416 {
3417 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3418 return iemRaiseGeneralProtectionFault0(pVCpu);
3419 }
3420
3421 /* Fetch the descriptor for the new CS. */
3422 IEMSELDESC DescCS;
3423 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3424 if (rcStrict != VINF_SUCCESS)
3425 {
3426 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3427 return rcStrict;
3428 }
3429
3430 /* Must be a 64-bit code segment. */
3431 if (!DescCS.Long.Gen.u1DescType)
3432 {
3433 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3434 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3435 }
3436 if ( !DescCS.Long.Gen.u1Long
3437 || DescCS.Long.Gen.u1DefBig
3438 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3439 {
3440 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3441 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3442 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3443 }
3444
3445 /* Don't allow lowering the privilege level. For non-conforming CS
3446 selectors, the CS.DPL sets the privilege level the trap/interrupt
3447 handler runs at. For conforming CS selectors, the CPL remains
3448 unchanged, but the CS.DPL must be <= CPL. */
3449 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3450 * when CPU in Ring-0. Result \#GP? */
3451 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3452 {
3453 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3454 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3455 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3456 }
3457
3458
3459 /* Make sure the selector is present. */
3460 if (!DescCS.Legacy.Gen.u1Present)
3461 {
3462 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3463 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3464 }
3465
3466 /* Check that the new RIP is canonical. */
3467 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3468 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3469 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3470 if (!IEM_IS_CANONICAL(uNewRip))
3471 {
3472 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3473 return iemRaiseGeneralProtectionFault0(pVCpu);
3474 }
3475
3476 /*
3477 * If the privilege level changes or if the IST isn't zero, we need to get
3478 * a new stack from the TSS.
3479 */
3480 uint64_t uNewRsp;
3481 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3482 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3483 if ( uNewCpl != pVCpu->iem.s.uCpl
3484 || Idte.Gate.u3IST != 0)
3485 {
3486 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3487 if (rcStrict != VINF_SUCCESS)
3488 return rcStrict;
3489 }
3490 else
3491 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3492 uNewRsp &= ~(uint64_t)0xf;
3493
3494 /*
3495 * Calc the flag image to push.
3496 */
3497 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3498 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3499 fEfl &= ~X86_EFL_RF;
3500 else
3501 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3502
3503 /*
3504 * Start making changes.
3505 */
3506 /* Set the new CPL so that stack accesses use it. */
3507 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3508 pVCpu->iem.s.uCpl = uNewCpl;
3509
3510 /* Create the stack frame. */
3511 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3512 RTPTRUNION uStackFrame;
3513 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3514 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3515 if (rcStrict != VINF_SUCCESS)
3516 return rcStrict;
3517 void * const pvStackFrame = uStackFrame.pv;
3518
3519 if (fFlags & IEM_XCPT_FLAGS_ERR)
3520 *uStackFrame.pu64++ = uErr;
3521 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3522 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3523 uStackFrame.pu64[2] = fEfl;
3524 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3525 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3526 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3527 if (rcStrict != VINF_SUCCESS)
3528 return rcStrict;
3529
3530 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3531 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3532 * after pushing the stack frame? (Write protect the gdt + stack to
3533 * find out.) */
3534 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3535 {
3536 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3537 if (rcStrict != VINF_SUCCESS)
3538 return rcStrict;
3539 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3540 }
3541
3542 /*
3543 * Start comitting the register changes.
3544 */
3545 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3546 * hidden registers when interrupting 32-bit or 16-bit code! */
3547 if (uNewCpl != uOldCpl)
3548 {
3549 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3550 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3551 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3552 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3553 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3554 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3555 }
3556 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3557 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3558 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3559 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3560 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3561 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3562 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3563 pVCpu->cpum.GstCtx.rip = uNewRip;
3564
3565 fEfl &= ~fEflToClear;
3566 IEMMISC_SET_EFL(pVCpu, fEfl);
3567
3568 if (fFlags & IEM_XCPT_FLAGS_CR2)
3569 pVCpu->cpum.GstCtx.cr2 = uCr2;
3570
3571 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3572 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3573
3574 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3575}
3576
3577
3578/**
3579 * Implements exceptions and interrupts.
3580 *
3581 * All exceptions and interrupts goes thru this function!
3582 *
3583 * @returns VBox strict status code.
3584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3585 * @param cbInstr The number of bytes to offset rIP by in the return
3586 * address.
3587 * @param u8Vector The interrupt / exception vector number.
3588 * @param fFlags The flags.
3589 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3590 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3591 */
3592VBOXSTRICTRC
3593iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3594 uint8_t cbInstr,
3595 uint8_t u8Vector,
3596 uint32_t fFlags,
3597 uint16_t uErr,
3598 uint64_t uCr2) RT_NOEXCEPT
3599{
3600 /*
3601 * Get all the state that we might need here.
3602 */
3603 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3604 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3605
3606#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3607 /*
3608 * Flush prefetch buffer
3609 */
3610 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3611#endif
3612
3613 /*
3614 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3615 */
3616 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3617 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3618 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3619 | IEM_XCPT_FLAGS_BP_INSTR
3620 | IEM_XCPT_FLAGS_ICEBP_INSTR
3621 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3622 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3623 {
3624 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3625 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3626 u8Vector = X86_XCPT_GP;
3627 uErr = 0;
3628 }
3629#ifdef DBGFTRACE_ENABLED
3630 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3631 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3632 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3633#endif
3634
3635 /*
3636 * Evaluate whether NMI blocking should be in effect.
3637 * Normally, NMI blocking is in effect whenever we inject an NMI.
3638 */
3639 bool fBlockNmi;
3640 if ( u8Vector == X86_XCPT_NMI
3641 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3642 fBlockNmi = true;
3643 else
3644 fBlockNmi = false;
3645
3646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3647 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3648 {
3649 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3650 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3651 return rcStrict0;
3652
3653 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3654 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3655 {
3656 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3657 fBlockNmi = false;
3658 }
3659 }
3660#endif
3661
3662#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3663 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3664 {
3665 /*
3666 * If the event is being injected as part of VMRUN, it isn't subject to event
3667 * intercepts in the nested-guest. However, secondary exceptions that occur
3668 * during injection of any event -are- subject to exception intercepts.
3669 *
3670 * See AMD spec. 15.20 "Event Injection".
3671 */
3672 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3673 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3674 else
3675 {
3676 /*
3677 * Check and handle if the event being raised is intercepted.
3678 */
3679 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3680 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3681 return rcStrict0;
3682 }
3683 }
3684#endif
3685
3686 /*
3687 * Set NMI blocking if necessary.
3688 */
3689 if ( fBlockNmi
3690 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3691 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3692
3693 /*
3694 * Do recursion accounting.
3695 */
3696 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3697 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3698 if (pVCpu->iem.s.cXcptRecursions == 0)
3699 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3700 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3701 else
3702 {
3703 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3704 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3705 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3706
3707 if (pVCpu->iem.s.cXcptRecursions >= 4)
3708 {
3709#ifdef DEBUG_bird
3710 AssertFailed();
3711#endif
3712 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3713 }
3714
3715 /*
3716 * Evaluate the sequence of recurring events.
3717 */
3718 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3719 NULL /* pXcptRaiseInfo */);
3720 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3721 { /* likely */ }
3722 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3723 {
3724 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3725 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3726 u8Vector = X86_XCPT_DF;
3727 uErr = 0;
3728#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3729 /* VMX nested-guest #DF intercept needs to be checked here. */
3730 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3731 {
3732 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3733 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3734 return rcStrict0;
3735 }
3736#endif
3737 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3738 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3739 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3740 }
3741 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3742 {
3743 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3744 return iemInitiateCpuShutdown(pVCpu);
3745 }
3746 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3747 {
3748 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3749 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3750 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3751 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3752 return VERR_EM_GUEST_CPU_HANG;
3753 }
3754 else
3755 {
3756 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3757 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3758 return VERR_IEM_IPE_9;
3759 }
3760
3761 /*
3762 * The 'EXT' bit is set when an exception occurs during deliver of an external
3763 * event (such as an interrupt or earlier exception)[1]. Privileged software
3764 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3765 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3766 *
3767 * [1] - Intel spec. 6.13 "Error Code"
3768 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3769 * [3] - Intel Instruction reference for INT n.
3770 */
3771 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3772 && (fFlags & IEM_XCPT_FLAGS_ERR)
3773 && u8Vector != X86_XCPT_PF
3774 && u8Vector != X86_XCPT_DF)
3775 {
3776 uErr |= X86_TRAP_ERR_EXTERNAL;
3777 }
3778 }
3779
3780 pVCpu->iem.s.cXcptRecursions++;
3781 pVCpu->iem.s.uCurXcpt = u8Vector;
3782 pVCpu->iem.s.fCurXcpt = fFlags;
3783 pVCpu->iem.s.uCurXcptErr = uErr;
3784 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3785
3786 /*
3787 * Extensive logging.
3788 */
3789#if defined(LOG_ENABLED) && defined(IN_RING3)
3790 if (LogIs3Enabled())
3791 {
3792 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3793 PVM pVM = pVCpu->CTX_SUFF(pVM);
3794 char szRegs[4096];
3795 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3796 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3797 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3798 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3799 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3800 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3801 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3802 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3803 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3804 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3805 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3806 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3807 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3808 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3809 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3810 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3811 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3812 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3813 " efer=%016VR{efer}\n"
3814 " pat=%016VR{pat}\n"
3815 " sf_mask=%016VR{sf_mask}\n"
3816 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3817 " lstar=%016VR{lstar}\n"
3818 " star=%016VR{star} cstar=%016VR{cstar}\n"
3819 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3820 );
3821
3822 char szInstr[256];
3823 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3824 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3825 szInstr, sizeof(szInstr), NULL);
3826 Log3(("%s%s\n", szRegs, szInstr));
3827 }
3828#endif /* LOG_ENABLED */
3829
3830 /*
3831 * Call the mode specific worker function.
3832 */
3833 VBOXSTRICTRC rcStrict;
3834 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3835 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3836 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3837 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3838 else
3839 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3840
3841 /* Flush the prefetch buffer. */
3842#ifdef IEM_WITH_CODE_TLB
3843 pVCpu->iem.s.pbInstrBuf = NULL;
3844#else
3845 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3846#endif
3847
3848 /*
3849 * Unwind.
3850 */
3851 pVCpu->iem.s.cXcptRecursions--;
3852 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3853 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3854 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3855 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3856 pVCpu->iem.s.cXcptRecursions + 1));
3857 return rcStrict;
3858}
3859
3860#ifdef IEM_WITH_SETJMP
3861/**
3862 * See iemRaiseXcptOrInt. Will not return.
3863 */
3864DECL_NO_RETURN(void)
3865iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3866 uint8_t cbInstr,
3867 uint8_t u8Vector,
3868 uint32_t fFlags,
3869 uint16_t uErr,
3870 uint64_t uCr2) RT_NOEXCEPT
3871{
3872 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3873 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3874}
3875#endif
3876
3877
3878/** \#DE - 00. */
3879VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3880{
3881 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3882}
3883
3884
3885/** \#DB - 01.
3886 * @note This automatically clear DR7.GD. */
3887VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3888{
3889 /** @todo set/clear RF. */
3890 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3891 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3892}
3893
3894
3895/** \#BR - 05. */
3896VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3897{
3898 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3899}
3900
3901
3902/** \#UD - 06. */
3903VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3904{
3905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3906}
3907
3908
3909/** \#NM - 07. */
3910VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3911{
3912 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3913}
3914
3915
3916/** \#TS(err) - 0a. */
3917VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3918{
3919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3920}
3921
3922
3923/** \#TS(tr) - 0a. */
3924VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3925{
3926 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3927 pVCpu->cpum.GstCtx.tr.Sel, 0);
3928}
3929
3930
3931/** \#TS(0) - 0a. */
3932VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3933{
3934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3935 0, 0);
3936}
3937
3938
3939/** \#TS(err) - 0a. */
3940VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3941{
3942 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3943 uSel & X86_SEL_MASK_OFF_RPL, 0);
3944}
3945
3946
3947/** \#NP(err) - 0b. */
3948VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3949{
3950 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3951}
3952
3953
3954/** \#NP(sel) - 0b. */
3955VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3956{
3957 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3958 uSel & ~X86_SEL_RPL, 0);
3959}
3960
3961
3962/** \#SS(seg) - 0c. */
3963VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3964{
3965 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3966 uSel & ~X86_SEL_RPL, 0);
3967}
3968
3969
3970/** \#SS(err) - 0c. */
3971VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3972{
3973 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3974}
3975
3976
3977/** \#GP(n) - 0d. */
3978VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3979{
3980 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3981}
3982
3983
3984/** \#GP(0) - 0d. */
3985VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3986{
3987 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3988}
3989
3990#ifdef IEM_WITH_SETJMP
3991/** \#GP(0) - 0d. */
3992DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
3993{
3994 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3995}
3996#endif
3997
3998
3999/** \#GP(sel) - 0d. */
4000VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4001{
4002 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4003 Sel & ~X86_SEL_RPL, 0);
4004}
4005
4006
4007/** \#GP(0) - 0d. */
4008VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4009{
4010 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4011}
4012
4013
4014/** \#GP(sel) - 0d. */
4015VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4016{
4017 NOREF(iSegReg); NOREF(fAccess);
4018 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4019 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4020}
4021
4022#ifdef IEM_WITH_SETJMP
4023/** \#GP(sel) - 0d, longjmp. */
4024DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4025{
4026 NOREF(iSegReg); NOREF(fAccess);
4027 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4028 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4029}
4030#endif
4031
4032/** \#GP(sel) - 0d. */
4033VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4034{
4035 NOREF(Sel);
4036 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4037}
4038
4039#ifdef IEM_WITH_SETJMP
4040/** \#GP(sel) - 0d, longjmp. */
4041DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4042{
4043 NOREF(Sel);
4044 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4045}
4046#endif
4047
4048
4049/** \#GP(sel) - 0d. */
4050VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4051{
4052 NOREF(iSegReg); NOREF(fAccess);
4053 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4054}
4055
4056#ifdef IEM_WITH_SETJMP
4057/** \#GP(sel) - 0d, longjmp. */
4058DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4059{
4060 NOREF(iSegReg); NOREF(fAccess);
4061 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4062}
4063#endif
4064
4065
4066/** \#PF(n) - 0e. */
4067VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4068{
4069 uint16_t uErr;
4070 switch (rc)
4071 {
4072 case VERR_PAGE_NOT_PRESENT:
4073 case VERR_PAGE_TABLE_NOT_PRESENT:
4074 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4075 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4076 uErr = 0;
4077 break;
4078
4079 default:
4080 AssertMsgFailed(("%Rrc\n", rc));
4081 RT_FALL_THRU();
4082 case VERR_ACCESS_DENIED:
4083 uErr = X86_TRAP_PF_P;
4084 break;
4085
4086 /** @todo reserved */
4087 }
4088
4089 if (pVCpu->iem.s.uCpl == 3)
4090 uErr |= X86_TRAP_PF_US;
4091
4092 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4093 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4094 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4095 uErr |= X86_TRAP_PF_ID;
4096
4097#if 0 /* This is so much non-sense, really. Why was it done like that? */
4098 /* Note! RW access callers reporting a WRITE protection fault, will clear
4099 the READ flag before calling. So, read-modify-write accesses (RW)
4100 can safely be reported as READ faults. */
4101 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4102 uErr |= X86_TRAP_PF_RW;
4103#else
4104 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4105 {
4106 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4107 /// (regardless of outcome of the comparison in the latter case).
4108 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4109 uErr |= X86_TRAP_PF_RW;
4110 }
4111#endif
4112
4113 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4114 uErr, GCPtrWhere);
4115}
4116
4117#ifdef IEM_WITH_SETJMP
4118/** \#PF(n) - 0e, longjmp. */
4119DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4120{
4121 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4122}
4123#endif
4124
4125
4126/** \#MF(0) - 10. */
4127VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4128{
4129 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4130}
4131
4132
4133/** \#AC(0) - 11. */
4134VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4135{
4136 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4137}
4138
4139#ifdef IEM_WITH_SETJMP
4140/** \#AC(0) - 11, longjmp. */
4141DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4142{
4143 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4144}
4145#endif
4146
4147
4148/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4149IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4150{
4151 NOREF(cbInstr);
4152 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4153}
4154
4155
4156/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4157IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4158{
4159 NOREF(cbInstr);
4160 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4161}
4162
4163
4164/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4165IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4166{
4167 NOREF(cbInstr);
4168 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4169}
4170
4171
4172/** @} */
4173
4174/** @name Common opcode decoders.
4175 * @{
4176 */
4177//#include <iprt/mem.h>
4178
4179/**
4180 * Used to add extra details about a stub case.
4181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4182 */
4183void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4184{
4185#if defined(LOG_ENABLED) && defined(IN_RING3)
4186 PVM pVM = pVCpu->CTX_SUFF(pVM);
4187 char szRegs[4096];
4188 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4189 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4190 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4191 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4192 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4193 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4194 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4195 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4196 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4197 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4198 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4199 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4200 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4201 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4202 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4203 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4204 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4205 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4206 " efer=%016VR{efer}\n"
4207 " pat=%016VR{pat}\n"
4208 " sf_mask=%016VR{sf_mask}\n"
4209 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4210 " lstar=%016VR{lstar}\n"
4211 " star=%016VR{star} cstar=%016VR{cstar}\n"
4212 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4213 );
4214
4215 char szInstr[256];
4216 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4217 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4218 szInstr, sizeof(szInstr), NULL);
4219
4220 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4221#else
4222 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4223#endif
4224}
4225
4226/** @} */
4227
4228
4229
4230/** @name Register Access.
4231 * @{
4232 */
4233
4234/**
4235 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4236 *
4237 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4238 * segment limit.
4239 *
4240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4241 * @param offNextInstr The offset of the next instruction.
4242 */
4243VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4244{
4245 switch (pVCpu->iem.s.enmEffOpSize)
4246 {
4247 case IEMMODE_16BIT:
4248 {
4249 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4250 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4251 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4252 return iemRaiseGeneralProtectionFault0(pVCpu);
4253 pVCpu->cpum.GstCtx.rip = uNewIp;
4254 break;
4255 }
4256
4257 case IEMMODE_32BIT:
4258 {
4259 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4260 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4261
4262 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4263 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4264 return iemRaiseGeneralProtectionFault0(pVCpu);
4265 pVCpu->cpum.GstCtx.rip = uNewEip;
4266 break;
4267 }
4268
4269 case IEMMODE_64BIT:
4270 {
4271 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4272
4273 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4274 if (!IEM_IS_CANONICAL(uNewRip))
4275 return iemRaiseGeneralProtectionFault0(pVCpu);
4276 pVCpu->cpum.GstCtx.rip = uNewRip;
4277 break;
4278 }
4279
4280 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4281 }
4282
4283 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4284
4285#ifndef IEM_WITH_CODE_TLB
4286 /* Flush the prefetch buffer. */
4287 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4288#endif
4289
4290 return VINF_SUCCESS;
4291}
4292
4293
4294/**
4295 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4296 *
4297 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4298 * segment limit.
4299 *
4300 * @returns Strict VBox status code.
4301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4302 * @param offNextInstr The offset of the next instruction.
4303 */
4304VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4305{
4306 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4307
4308 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4309 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4310 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4311 return iemRaiseGeneralProtectionFault0(pVCpu);
4312 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4313 pVCpu->cpum.GstCtx.rip = uNewIp;
4314 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4315
4316#ifndef IEM_WITH_CODE_TLB
4317 /* Flush the prefetch buffer. */
4318 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4319#endif
4320
4321 return VINF_SUCCESS;
4322}
4323
4324
4325/**
4326 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4327 *
4328 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4329 * segment limit.
4330 *
4331 * @returns Strict VBox status code.
4332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4333 * @param offNextInstr The offset of the next instruction.
4334 */
4335VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4336{
4337 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4338
4339 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4340 {
4341 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4342
4343 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4344 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4345 return iemRaiseGeneralProtectionFault0(pVCpu);
4346 pVCpu->cpum.GstCtx.rip = uNewEip;
4347 }
4348 else
4349 {
4350 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4351
4352 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4353 if (!IEM_IS_CANONICAL(uNewRip))
4354 return iemRaiseGeneralProtectionFault0(pVCpu);
4355 pVCpu->cpum.GstCtx.rip = uNewRip;
4356 }
4357 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4358
4359#ifndef IEM_WITH_CODE_TLB
4360 /* Flush the prefetch buffer. */
4361 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4362#endif
4363
4364 return VINF_SUCCESS;
4365}
4366
4367
4368/**
4369 * Performs a near jump to the specified address.
4370 *
4371 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4372 * segment limit.
4373 *
4374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4375 * @param uNewRip The new RIP value.
4376 */
4377VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4378{
4379 switch (pVCpu->iem.s.enmEffOpSize)
4380 {
4381 case IEMMODE_16BIT:
4382 {
4383 Assert(uNewRip <= UINT16_MAX);
4384 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4385 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4386 return iemRaiseGeneralProtectionFault0(pVCpu);
4387 /** @todo Test 16-bit jump in 64-bit mode. */
4388 pVCpu->cpum.GstCtx.rip = uNewRip;
4389 break;
4390 }
4391
4392 case IEMMODE_32BIT:
4393 {
4394 Assert(uNewRip <= UINT32_MAX);
4395 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4396 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4397
4398 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4399 return iemRaiseGeneralProtectionFault0(pVCpu);
4400 pVCpu->cpum.GstCtx.rip = uNewRip;
4401 break;
4402 }
4403
4404 case IEMMODE_64BIT:
4405 {
4406 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4407
4408 if (!IEM_IS_CANONICAL(uNewRip))
4409 return iemRaiseGeneralProtectionFault0(pVCpu);
4410 pVCpu->cpum.GstCtx.rip = uNewRip;
4411 break;
4412 }
4413
4414 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4415 }
4416
4417 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4418
4419#ifndef IEM_WITH_CODE_TLB
4420 /* Flush the prefetch buffer. */
4421 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4422#endif
4423
4424 return VINF_SUCCESS;
4425}
4426
4427/** @} */
4428
4429
4430/** @name FPU access and helpers.
4431 *
4432 * @{
4433 */
4434
4435/**
4436 * Updates the x87.DS and FPUDP registers.
4437 *
4438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4439 * @param pFpuCtx The FPU context.
4440 * @param iEffSeg The effective segment register.
4441 * @param GCPtrEff The effective address relative to @a iEffSeg.
4442 */
4443DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4444{
4445 RTSEL sel;
4446 switch (iEffSeg)
4447 {
4448 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4449 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4450 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4451 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4452 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4453 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4454 default:
4455 AssertMsgFailed(("%d\n", iEffSeg));
4456 sel = pVCpu->cpum.GstCtx.ds.Sel;
4457 }
4458 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4459 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4460 {
4461 pFpuCtx->DS = 0;
4462 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4463 }
4464 else if (!IEM_IS_LONG_MODE(pVCpu))
4465 {
4466 pFpuCtx->DS = sel;
4467 pFpuCtx->FPUDP = GCPtrEff;
4468 }
4469 else
4470 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4471}
4472
4473
4474/**
4475 * Rotates the stack registers in the push direction.
4476 *
4477 * @param pFpuCtx The FPU context.
4478 * @remarks This is a complete waste of time, but fxsave stores the registers in
4479 * stack order.
4480 */
4481DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4482{
4483 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4484 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4485 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4486 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4487 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4488 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4489 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4490 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4491 pFpuCtx->aRegs[0].r80 = r80Tmp;
4492}
4493
4494
4495/**
4496 * Rotates the stack registers in the pop direction.
4497 *
4498 * @param pFpuCtx The FPU context.
4499 * @remarks This is a complete waste of time, but fxsave stores the registers in
4500 * stack order.
4501 */
4502DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4503{
4504 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4505 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4506 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4507 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4508 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4509 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4510 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4511 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4512 pFpuCtx->aRegs[7].r80 = r80Tmp;
4513}
4514
4515
4516/**
4517 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4518 * exception prevents it.
4519 *
4520 * @param pResult The FPU operation result to push.
4521 * @param pFpuCtx The FPU context.
4522 */
4523static void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4524{
4525 /* Update FSW and bail if there are pending exceptions afterwards. */
4526 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4527 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4528 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4529 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4530 {
4531 pFpuCtx->FSW = fFsw;
4532 return;
4533 }
4534
4535 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4536 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4537 {
4538 /* All is fine, push the actual value. */
4539 pFpuCtx->FTW |= RT_BIT(iNewTop);
4540 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4541 }
4542 else if (pFpuCtx->FCW & X86_FCW_IM)
4543 {
4544 /* Masked stack overflow, push QNaN. */
4545 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4546 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4547 }
4548 else
4549 {
4550 /* Raise stack overflow, don't push anything. */
4551 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4552 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4553 return;
4554 }
4555
4556 fFsw &= ~X86_FSW_TOP_MASK;
4557 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4558 pFpuCtx->FSW = fFsw;
4559
4560 iemFpuRotateStackPush(pFpuCtx);
4561}
4562
4563
4564/**
4565 * Stores a result in a FPU register and updates the FSW and FTW.
4566 *
4567 * @param pFpuCtx The FPU context.
4568 * @param pResult The result to store.
4569 * @param iStReg Which FPU register to store it in.
4570 */
4571static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4572{
4573 Assert(iStReg < 8);
4574 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4575 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4576 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4577 pFpuCtx->FTW |= RT_BIT(iReg);
4578 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4579}
4580
4581
4582/**
4583 * Only updates the FPU status word (FSW) with the result of the current
4584 * instruction.
4585 *
4586 * @param pFpuCtx The FPU context.
4587 * @param u16FSW The FSW output of the current instruction.
4588 */
4589static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4590{
4591 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4592 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4593}
4594
4595
4596/**
4597 * Pops one item off the FPU stack if no pending exception prevents it.
4598 *
4599 * @param pFpuCtx The FPU context.
4600 */
4601static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4602{
4603 /* Check pending exceptions. */
4604 uint16_t uFSW = pFpuCtx->FSW;
4605 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4606 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4607 return;
4608
4609 /* TOP--. */
4610 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4611 uFSW &= ~X86_FSW_TOP_MASK;
4612 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4613 pFpuCtx->FSW = uFSW;
4614
4615 /* Mark the previous ST0 as empty. */
4616 iOldTop >>= X86_FSW_TOP_SHIFT;
4617 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4618
4619 /* Rotate the registers. */
4620 iemFpuRotateStackPop(pFpuCtx);
4621}
4622
4623
4624/**
4625 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4626 *
4627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4628 * @param pResult The FPU operation result to push.
4629 */
4630void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4631{
4632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4633 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4634 iemFpuMaybePushResult(pResult, pFpuCtx);
4635}
4636
4637
4638/**
4639 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4640 * and sets FPUDP and FPUDS.
4641 *
4642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4643 * @param pResult The FPU operation result to push.
4644 * @param iEffSeg The effective segment register.
4645 * @param GCPtrEff The effective address relative to @a iEffSeg.
4646 */
4647void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4648{
4649 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4650 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4651 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4652 iemFpuMaybePushResult(pResult, pFpuCtx);
4653}
4654
4655
4656/**
4657 * Replace ST0 with the first value and push the second onto the FPU stack,
4658 * unless a pending exception prevents it.
4659 *
4660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4661 * @param pResult The FPU operation result to store and push.
4662 */
4663void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4664{
4665 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4666 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4667
4668 /* Update FSW and bail if there are pending exceptions afterwards. */
4669 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4670 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4671 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4672 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4673 {
4674 pFpuCtx->FSW = fFsw;
4675 return;
4676 }
4677
4678 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4679 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4680 {
4681 /* All is fine, push the actual value. */
4682 pFpuCtx->FTW |= RT_BIT(iNewTop);
4683 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4684 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4685 }
4686 else if (pFpuCtx->FCW & X86_FCW_IM)
4687 {
4688 /* Masked stack overflow, push QNaN. */
4689 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4690 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4691 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4692 }
4693 else
4694 {
4695 /* Raise stack overflow, don't push anything. */
4696 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4697 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4698 return;
4699 }
4700
4701 fFsw &= ~X86_FSW_TOP_MASK;
4702 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4703 pFpuCtx->FSW = fFsw;
4704
4705 iemFpuRotateStackPush(pFpuCtx);
4706}
4707
4708
4709/**
4710 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4711 * FOP.
4712 *
4713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4714 * @param pResult The result to store.
4715 * @param iStReg Which FPU register to store it in.
4716 */
4717void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4718{
4719 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4720 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4721 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4722}
4723
4724
4725/**
4726 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4727 * FOP, and then pops the stack.
4728 *
4729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4730 * @param pResult The result to store.
4731 * @param iStReg Which FPU register to store it in.
4732 */
4733void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4734{
4735 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4736 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4737 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4738 iemFpuMaybePopOne(pFpuCtx);
4739}
4740
4741
4742/**
4743 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4744 * FPUDP, and FPUDS.
4745 *
4746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4747 * @param pResult The result to store.
4748 * @param iStReg Which FPU register to store it in.
4749 * @param iEffSeg The effective memory operand selector register.
4750 * @param GCPtrEff The effective memory operand offset.
4751 */
4752void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4753 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4754{
4755 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4756 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4757 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4758 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4759}
4760
4761
4762/**
4763 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4764 * FPUDP, and FPUDS, and then pops the stack.
4765 *
4766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4767 * @param pResult The result to store.
4768 * @param iStReg Which FPU register to store it in.
4769 * @param iEffSeg The effective memory operand selector register.
4770 * @param GCPtrEff The effective memory operand offset.
4771 */
4772void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4773 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4774{
4775 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4776 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4777 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4778 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4779 iemFpuMaybePopOne(pFpuCtx);
4780}
4781
4782
4783/**
4784 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4785 *
4786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4787 */
4788void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4789{
4790 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4791 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4792}
4793
4794
4795/**
4796 * Updates the FSW, FOP, FPUIP, and FPUCS.
4797 *
4798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4799 * @param u16FSW The FSW from the current instruction.
4800 */
4801void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4802{
4803 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4804 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4805 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4806}
4807
4808
4809/**
4810 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4811 *
4812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4813 * @param u16FSW The FSW from the current instruction.
4814 */
4815void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4816{
4817 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4818 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4819 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4820 iemFpuMaybePopOne(pFpuCtx);
4821}
4822
4823
4824/**
4825 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4826 *
4827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4828 * @param u16FSW The FSW from the current instruction.
4829 * @param iEffSeg The effective memory operand selector register.
4830 * @param GCPtrEff The effective memory operand offset.
4831 */
4832void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4833{
4834 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4835 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4836 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4837 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4838}
4839
4840
4841/**
4842 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4843 *
4844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4845 * @param u16FSW The FSW from the current instruction.
4846 */
4847void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4848{
4849 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4850 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4851 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4852 iemFpuMaybePopOne(pFpuCtx);
4853 iemFpuMaybePopOne(pFpuCtx);
4854}
4855
4856
4857/**
4858 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4859 *
4860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4861 * @param u16FSW The FSW from the current instruction.
4862 * @param iEffSeg The effective memory operand selector register.
4863 * @param GCPtrEff The effective memory operand offset.
4864 */
4865void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4866{
4867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4868 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4869 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4870 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4871 iemFpuMaybePopOne(pFpuCtx);
4872}
4873
4874
4875/**
4876 * Worker routine for raising an FPU stack underflow exception.
4877 *
4878 * @param pFpuCtx The FPU context.
4879 * @param iStReg The stack register being accessed.
4880 */
4881static void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
4882{
4883 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4884 if (pFpuCtx->FCW & X86_FCW_IM)
4885 {
4886 /* Masked underflow. */
4887 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4888 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4889 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4890 if (iStReg != UINT8_MAX)
4891 {
4892 pFpuCtx->FTW |= RT_BIT(iReg);
4893 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4894 }
4895 }
4896 else
4897 {
4898 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4899 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4900 }
4901}
4902
4903
4904/**
4905 * Raises a FPU stack underflow exception.
4906 *
4907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4908 * @param iStReg The destination register that should be loaded
4909 * with QNaN if \#IS is not masked. Specify
4910 * UINT8_MAX if none (like for fcom).
4911 */
4912void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4913{
4914 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4915 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4916 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4917}
4918
4919
4920void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4921{
4922 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4923 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4924 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4925 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4926}
4927
4928
4929void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4930{
4931 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4932 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4933 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4934 iemFpuMaybePopOne(pFpuCtx);
4935}
4936
4937
4938void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4939{
4940 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4941 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4942 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4943 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4944 iemFpuMaybePopOne(pFpuCtx);
4945}
4946
4947
4948void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
4949{
4950 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4951 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4952 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
4953 iemFpuMaybePopOne(pFpuCtx);
4954 iemFpuMaybePopOne(pFpuCtx);
4955}
4956
4957
4958void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
4959{
4960 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4961 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4962
4963 if (pFpuCtx->FCW & X86_FCW_IM)
4964 {
4965 /* Masked overflow - Push QNaN. */
4966 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4967 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4968 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4969 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4970 pFpuCtx->FTW |= RT_BIT(iNewTop);
4971 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4972 iemFpuRotateStackPush(pFpuCtx);
4973 }
4974 else
4975 {
4976 /* Exception pending - don't change TOP or the register stack. */
4977 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4978 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4979 }
4980}
4981
4982
4983void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
4984{
4985 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4986 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4987
4988 if (pFpuCtx->FCW & X86_FCW_IM)
4989 {
4990 /* Masked overflow - Push QNaN. */
4991 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4992 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4993 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4994 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4995 pFpuCtx->FTW |= RT_BIT(iNewTop);
4996 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4997 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4998 iemFpuRotateStackPush(pFpuCtx);
4999 }
5000 else
5001 {
5002 /* Exception pending - don't change TOP or the register stack. */
5003 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5004 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5005 }
5006}
5007
5008
5009/**
5010 * Worker routine for raising an FPU stack overflow exception on a push.
5011 *
5012 * @param pFpuCtx The FPU context.
5013 */
5014static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5015{
5016 if (pFpuCtx->FCW & X86_FCW_IM)
5017 {
5018 /* Masked overflow. */
5019 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5020 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5021 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5022 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5023 pFpuCtx->FTW |= RT_BIT(iNewTop);
5024 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5025 iemFpuRotateStackPush(pFpuCtx);
5026 }
5027 else
5028 {
5029 /* Exception pending - don't change TOP or the register stack. */
5030 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5031 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5032 }
5033}
5034
5035
5036/**
5037 * Raises a FPU stack overflow exception on a push.
5038 *
5039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5040 */
5041void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5042{
5043 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5044 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5045 iemFpuStackPushOverflowOnly(pFpuCtx);
5046}
5047
5048
5049/**
5050 * Raises a FPU stack overflow exception on a push with a memory operand.
5051 *
5052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5053 * @param iEffSeg The effective memory operand selector register.
5054 * @param GCPtrEff The effective memory operand offset.
5055 */
5056void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5057{
5058 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5059 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5060 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5061 iemFpuStackPushOverflowOnly(pFpuCtx);
5062}
5063
5064/** @} */
5065
5066
5067/** @name Memory access.
5068 *
5069 * @{
5070 */
5071
5072
5073/**
5074 * Updates the IEMCPU::cbWritten counter if applicable.
5075 *
5076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5077 * @param fAccess The access being accounted for.
5078 * @param cbMem The access size.
5079 */
5080DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5081{
5082 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5083 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5084 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5085}
5086
5087
5088/**
5089 * Applies the segment limit, base and attributes.
5090 *
5091 * This may raise a \#GP or \#SS.
5092 *
5093 * @returns VBox strict status code.
5094 *
5095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5096 * @param fAccess The kind of access which is being performed.
5097 * @param iSegReg The index of the segment register to apply.
5098 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5099 * TSS, ++).
5100 * @param cbMem The access size.
5101 * @param pGCPtrMem Pointer to the guest memory address to apply
5102 * segmentation to. Input and output parameter.
5103 */
5104VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5105{
5106 if (iSegReg == UINT8_MAX)
5107 return VINF_SUCCESS;
5108
5109 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5110 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5111 switch (pVCpu->iem.s.enmCpuMode)
5112 {
5113 case IEMMODE_16BIT:
5114 case IEMMODE_32BIT:
5115 {
5116 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5117 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5118
5119 if ( pSel->Attr.n.u1Present
5120 && !pSel->Attr.n.u1Unusable)
5121 {
5122 Assert(pSel->Attr.n.u1DescType);
5123 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5124 {
5125 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5126 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5127 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5128
5129 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5130 {
5131 /** @todo CPL check. */
5132 }
5133
5134 /*
5135 * There are two kinds of data selectors, normal and expand down.
5136 */
5137 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5138 {
5139 if ( GCPtrFirst32 > pSel->u32Limit
5140 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5141 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5142 }
5143 else
5144 {
5145 /*
5146 * The upper boundary is defined by the B bit, not the G bit!
5147 */
5148 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5149 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5150 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5151 }
5152 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5153 }
5154 else
5155 {
5156 /*
5157 * Code selector and usually be used to read thru, writing is
5158 * only permitted in real and V8086 mode.
5159 */
5160 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5161 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5162 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5163 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5164 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5165
5166 if ( GCPtrFirst32 > pSel->u32Limit
5167 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5168 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5169
5170 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5171 {
5172 /** @todo CPL check. */
5173 }
5174
5175 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5176 }
5177 }
5178 else
5179 return iemRaiseGeneralProtectionFault0(pVCpu);
5180 return VINF_SUCCESS;
5181 }
5182
5183 case IEMMODE_64BIT:
5184 {
5185 RTGCPTR GCPtrMem = *pGCPtrMem;
5186 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5187 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5188
5189 Assert(cbMem >= 1);
5190 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5191 return VINF_SUCCESS;
5192 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5193 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5194 return iemRaiseGeneralProtectionFault0(pVCpu);
5195 }
5196
5197 default:
5198 AssertFailedReturn(VERR_IEM_IPE_7);
5199 }
5200}
5201
5202
5203/**
5204 * Translates a virtual address to a physical physical address and checks if we
5205 * can access the page as specified.
5206 *
5207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5208 * @param GCPtrMem The virtual address.
5209 * @param fAccess The intended access.
5210 * @param pGCPhysMem Where to return the physical address.
5211 */
5212VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5213{
5214 /** @todo Need a different PGM interface here. We're currently using
5215 * generic / REM interfaces. this won't cut it for R0. */
5216 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5217 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5218 * here. */
5219 PGMPTWALK Walk;
5220 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5221 if (RT_FAILURE(rc))
5222 {
5223 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5224 /** @todo Check unassigned memory in unpaged mode. */
5225 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5226#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5227 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5228 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5229#endif
5230 *pGCPhysMem = NIL_RTGCPHYS;
5231 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5232 }
5233
5234 /* If the page is writable and does not have the no-exec bit set, all
5235 access is allowed. Otherwise we'll have to check more carefully... */
5236 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5237 {
5238 /* Write to read only memory? */
5239 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5240 && !(Walk.fEffective & X86_PTE_RW)
5241 && ( ( pVCpu->iem.s.uCpl == 3
5242 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5243 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5244 {
5245 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5246 *pGCPhysMem = NIL_RTGCPHYS;
5247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5250#endif
5251 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5252 }
5253
5254 /* Kernel memory accessed by userland? */
5255 if ( !(Walk.fEffective & X86_PTE_US)
5256 && pVCpu->iem.s.uCpl == 3
5257 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5258 {
5259 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5260 *pGCPhysMem = NIL_RTGCPHYS;
5261#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5262 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5263 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5264#endif
5265 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5266 }
5267
5268 /* Executing non-executable memory? */
5269 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5270 && (Walk.fEffective & X86_PTE_PAE_NX)
5271 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5272 {
5273 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5274 *pGCPhysMem = NIL_RTGCPHYS;
5275#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5276 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5277 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5278#endif
5279 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5280 VERR_ACCESS_DENIED);
5281 }
5282 }
5283
5284 /*
5285 * Set the dirty / access flags.
5286 * ASSUMES this is set when the address is translated rather than on committ...
5287 */
5288 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5289 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5290 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5291 {
5292 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5293 AssertRC(rc2);
5294 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5295 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5296 }
5297
5298 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5299 *pGCPhysMem = GCPhys;
5300 return VINF_SUCCESS;
5301}
5302
5303
5304/**
5305 * Looks up a memory mapping entry.
5306 *
5307 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5309 * @param pvMem The memory address.
5310 * @param fAccess The access to.
5311 */
5312DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5313{
5314 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5315 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5316 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5317 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5318 return 0;
5319 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5320 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5321 return 1;
5322 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5323 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5324 return 2;
5325 return VERR_NOT_FOUND;
5326}
5327
5328
5329/**
5330 * Finds a free memmap entry when using iNextMapping doesn't work.
5331 *
5332 * @returns Memory mapping index, 1024 on failure.
5333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5334 */
5335static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5336{
5337 /*
5338 * The easy case.
5339 */
5340 if (pVCpu->iem.s.cActiveMappings == 0)
5341 {
5342 pVCpu->iem.s.iNextMapping = 1;
5343 return 0;
5344 }
5345
5346 /* There should be enough mappings for all instructions. */
5347 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5348
5349 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5350 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5351 return i;
5352
5353 AssertFailedReturn(1024);
5354}
5355
5356
5357/**
5358 * Commits a bounce buffer that needs writing back and unmaps it.
5359 *
5360 * @returns Strict VBox status code.
5361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5362 * @param iMemMap The index of the buffer to commit.
5363 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5364 * Always false in ring-3, obviously.
5365 */
5366static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5367{
5368 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5369 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5370#ifdef IN_RING3
5371 Assert(!fPostponeFail);
5372 RT_NOREF_PV(fPostponeFail);
5373#endif
5374
5375 /*
5376 * Do the writing.
5377 */
5378 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5379 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5380 {
5381 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5382 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5383 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5384 if (!pVCpu->iem.s.fBypassHandlers)
5385 {
5386 /*
5387 * Carefully and efficiently dealing with access handler return
5388 * codes make this a little bloated.
5389 */
5390 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5392 pbBuf,
5393 cbFirst,
5394 PGMACCESSORIGIN_IEM);
5395 if (rcStrict == VINF_SUCCESS)
5396 {
5397 if (cbSecond)
5398 {
5399 rcStrict = PGMPhysWrite(pVM,
5400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5401 pbBuf + cbFirst,
5402 cbSecond,
5403 PGMACCESSORIGIN_IEM);
5404 if (rcStrict == VINF_SUCCESS)
5405 { /* nothing */ }
5406 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5407 {
5408 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5411 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5412 }
5413#ifndef IN_RING3
5414 else if (fPostponeFail)
5415 {
5416 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5419 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5420 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5421 return iemSetPassUpStatus(pVCpu, rcStrict);
5422 }
5423#endif
5424 else
5425 {
5426 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5429 return rcStrict;
5430 }
5431 }
5432 }
5433 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5434 {
5435 if (!cbSecond)
5436 {
5437 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5439 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5440 }
5441 else
5442 {
5443 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5445 pbBuf + cbFirst,
5446 cbSecond,
5447 PGMACCESSORIGIN_IEM);
5448 if (rcStrict2 == VINF_SUCCESS)
5449 {
5450 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5451 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5453 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5454 }
5455 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5456 {
5457 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5460 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5461 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5462 }
5463#ifndef IN_RING3
5464 else if (fPostponeFail)
5465 {
5466 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5469 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5470 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5471 return iemSetPassUpStatus(pVCpu, rcStrict);
5472 }
5473#endif
5474 else
5475 {
5476 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5479 return rcStrict2;
5480 }
5481 }
5482 }
5483#ifndef IN_RING3
5484 else if (fPostponeFail)
5485 {
5486 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5489 if (!cbSecond)
5490 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5491 else
5492 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5493 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5494 return iemSetPassUpStatus(pVCpu, rcStrict);
5495 }
5496#endif
5497 else
5498 {
5499 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5500 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5502 return rcStrict;
5503 }
5504 }
5505 else
5506 {
5507 /*
5508 * No access handlers, much simpler.
5509 */
5510 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5511 if (RT_SUCCESS(rc))
5512 {
5513 if (cbSecond)
5514 {
5515 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5516 if (RT_SUCCESS(rc))
5517 { /* likely */ }
5518 else
5519 {
5520 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5521 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5523 return rc;
5524 }
5525 }
5526 }
5527 else
5528 {
5529 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5530 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5531 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5532 return rc;
5533 }
5534 }
5535 }
5536
5537#if defined(IEM_LOG_MEMORY_WRITES)
5538 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5539 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5540 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5541 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5542 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5543 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5544
5545 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5546 g_cbIemWrote = cbWrote;
5547 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5548#endif
5549
5550 /*
5551 * Free the mapping entry.
5552 */
5553 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5554 Assert(pVCpu->iem.s.cActiveMappings != 0);
5555 pVCpu->iem.s.cActiveMappings--;
5556 return VINF_SUCCESS;
5557}
5558
5559
5560/**
5561 * iemMemMap worker that deals with a request crossing pages.
5562 */
5563static VBOXSTRICTRC
5564iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5565{
5566 /*
5567 * Do the address translations.
5568 */
5569 RTGCPHYS GCPhysFirst;
5570 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5571 if (rcStrict != VINF_SUCCESS)
5572 return rcStrict;
5573
5574 RTGCPHYS GCPhysSecond;
5575 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5576 fAccess, &GCPhysSecond);
5577 if (rcStrict != VINF_SUCCESS)
5578 return rcStrict;
5579 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5580
5581 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5582
5583 /*
5584 * Read in the current memory content if it's a read, execute or partial
5585 * write access.
5586 */
5587 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5588 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5589 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5590
5591 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5592 {
5593 if (!pVCpu->iem.s.fBypassHandlers)
5594 {
5595 /*
5596 * Must carefully deal with access handler status codes here,
5597 * makes the code a bit bloated.
5598 */
5599 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5600 if (rcStrict == VINF_SUCCESS)
5601 {
5602 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5603 if (rcStrict == VINF_SUCCESS)
5604 { /*likely */ }
5605 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5606 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5607 else
5608 {
5609 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5610 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5611 return rcStrict;
5612 }
5613 }
5614 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5615 {
5616 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5617 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5618 {
5619 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5620 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5621 }
5622 else
5623 {
5624 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5625 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5626 return rcStrict2;
5627 }
5628 }
5629 else
5630 {
5631 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5632 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5633 return rcStrict;
5634 }
5635 }
5636 else
5637 {
5638 /*
5639 * No informational status codes here, much more straight forward.
5640 */
5641 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5642 if (RT_SUCCESS(rc))
5643 {
5644 Assert(rc == VINF_SUCCESS);
5645 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5646 if (RT_SUCCESS(rc))
5647 Assert(rc == VINF_SUCCESS);
5648 else
5649 {
5650 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5651 return rc;
5652 }
5653 }
5654 else
5655 {
5656 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5657 return rc;
5658 }
5659 }
5660 }
5661#ifdef VBOX_STRICT
5662 else
5663 memset(pbBuf, 0xcc, cbMem);
5664 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5665 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5666#endif
5667
5668 /*
5669 * Commit the bounce buffer entry.
5670 */
5671 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5673 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5674 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5675 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5676 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5677 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5678 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5679 pVCpu->iem.s.cActiveMappings++;
5680
5681 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5682 *ppvMem = pbBuf;
5683 return VINF_SUCCESS;
5684}
5685
5686
5687/**
5688 * iemMemMap woker that deals with iemMemPageMap failures.
5689 */
5690static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5691 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5692{
5693 /*
5694 * Filter out conditions we can handle and the ones which shouldn't happen.
5695 */
5696 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5697 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5698 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5699 {
5700 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5701 return rcMap;
5702 }
5703 pVCpu->iem.s.cPotentialExits++;
5704
5705 /*
5706 * Read in the current memory content if it's a read, execute or partial
5707 * write access.
5708 */
5709 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5710 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5711 {
5712 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5713 memset(pbBuf, 0xff, cbMem);
5714 else
5715 {
5716 int rc;
5717 if (!pVCpu->iem.s.fBypassHandlers)
5718 {
5719 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5720 if (rcStrict == VINF_SUCCESS)
5721 { /* nothing */ }
5722 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5723 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5724 else
5725 {
5726 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5727 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5728 return rcStrict;
5729 }
5730 }
5731 else
5732 {
5733 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5734 if (RT_SUCCESS(rc))
5735 { /* likely */ }
5736 else
5737 {
5738 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5739 GCPhysFirst, rc));
5740 return rc;
5741 }
5742 }
5743 }
5744 }
5745#ifdef VBOX_STRICT
5746 else
5747 memset(pbBuf, 0xcc, cbMem);
5748#endif
5749#ifdef VBOX_STRICT
5750 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5751 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5752#endif
5753
5754 /*
5755 * Commit the bounce buffer entry.
5756 */
5757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5759 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5760 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5761 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5762 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5763 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5764 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5765 pVCpu->iem.s.cActiveMappings++;
5766
5767 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5768 *ppvMem = pbBuf;
5769 return VINF_SUCCESS;
5770}
5771
5772
5773
5774/**
5775 * Maps the specified guest memory for the given kind of access.
5776 *
5777 * This may be using bounce buffering of the memory if it's crossing a page
5778 * boundary or if there is an access handler installed for any of it. Because
5779 * of lock prefix guarantees, we're in for some extra clutter when this
5780 * happens.
5781 *
5782 * This may raise a \#GP, \#SS, \#PF or \#AC.
5783 *
5784 * @returns VBox strict status code.
5785 *
5786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5787 * @param ppvMem Where to return the pointer to the mapped
5788 * memory.
5789 * @param cbMem The number of bytes to map. This is usually 1,
5790 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5791 * string operations it can be up to a page.
5792 * @param iSegReg The index of the segment register to use for
5793 * this access. The base and limits are checked.
5794 * Use UINT8_MAX to indicate that no segmentation
5795 * is required (for IDT, GDT and LDT accesses).
5796 * @param GCPtrMem The address of the guest memory.
5797 * @param fAccess How the memory is being accessed. The
5798 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5799 * how to map the memory, while the
5800 * IEM_ACCESS_WHAT_XXX bit is used when raising
5801 * exceptions.
5802 */
5803VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
5804{
5805 /*
5806 * Check the input and figure out which mapping entry to use.
5807 */
5808 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
5809 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5810 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5811
5812 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5813 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5814 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5815 {
5816 iMemMap = iemMemMapFindFree(pVCpu);
5817 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5818 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5819 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5820 pVCpu->iem.s.aMemMappings[2].fAccess),
5821 VERR_IEM_IPE_9);
5822 }
5823
5824 /*
5825 * Map the memory, checking that we can actually access it. If something
5826 * slightly complicated happens, fall back on bounce buffering.
5827 */
5828 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5829 if (rcStrict == VINF_SUCCESS)
5830 { /* likely */ }
5831 else
5832 return rcStrict;
5833
5834 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5835 { /* likely */ }
5836 else
5837 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5838
5839#ifdef IEM_WITH_DATA_TLB
5840 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5841
5842 /*
5843 * Get the TLB entry for this page.
5844 */
5845 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
5846 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
5847 if (pTlbe->uTag == uTag)
5848 {
5849# ifdef VBOX_WITH_STATISTICS
5850 pVCpu->iem.s.DataTlb.cTlbHits++;
5851# endif
5852 }
5853 else
5854 {
5855 pVCpu->iem.s.DataTlb.cTlbMisses++;
5856 PGMPTWALK Walk;
5857 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5858 if (RT_FAILURE(rc))
5859 {
5860 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5861# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5862 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5863 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5864# endif
5865 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
5866 }
5867
5868 Assert(Walk.fSucceeded);
5869 pTlbe->uTag = uTag;
5870 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
5871 pTlbe->GCPhys = Walk.GCPhys;
5872 pTlbe->pbMappingR3 = NULL;
5873 }
5874
5875 /*
5876 * Check TLB page table level access flags.
5877 */
5878 /* If the page is either supervisor only or non-writable, we need to do
5879 more careful access checks. */
5880 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
5881 {
5882 /* Write to read only memory? */
5883 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
5884 && (fAccess & IEM_ACCESS_TYPE_WRITE)
5885 && ( ( pVCpu->iem.s.uCpl == 3
5886 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5887 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5888 {
5889 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5890# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5891 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5892 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5893# endif
5894 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5895 }
5896
5897 /* Kernel memory accessed by userland? */
5898 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
5899 && pVCpu->iem.s.uCpl == 3
5900 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5901 {
5902 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5903# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5904 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5905 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5906# endif
5907 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5908 }
5909 }
5910
5911 /*
5912 * Set the dirty / access flags.
5913 * ASSUMES this is set when the address is translated rather than on commit...
5914 */
5915 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5916 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
5917 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
5918 {
5919 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5920 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5921 AssertRC(rc2);
5922 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5923 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5924 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
5925 }
5926
5927 /*
5928 * Look up the physical page info if necessary.
5929 */
5930 uint8_t *pbMem = NULL;
5931 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
5932# ifdef IN_RING3
5933 pbMem = pTlbe->pbMappingR3;
5934# else
5935 pbMem = NULL;
5936# endif
5937 else
5938 {
5939 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
5940 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
5941 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
5942 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
5943 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
5944 { /* likely */ }
5945 else
5946 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
5947 pTlbe->pbMappingR3 = NULL;
5948 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
5949 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
5950 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
5951 &pbMem, &pTlbe->fFlagsAndPhysRev);
5952 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
5953# ifdef IN_RING3
5954 pTlbe->pbMappingR3 = pbMem;
5955# endif
5956 }
5957
5958 /*
5959 * Check the physical page level access and mapping.
5960 */
5961 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
5962 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
5963 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
5964 { /* probably likely */ }
5965 else
5966 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
5967 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
5968 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
5969 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
5970 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
5971 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
5972
5973 if (pbMem)
5974 {
5975 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
5976 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5977 fAccess |= IEM_ACCESS_NOT_LOCKED;
5978 }
5979 else
5980 {
5981 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
5982 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5983 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
5984 if (rcStrict != VINF_SUCCESS)
5985 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5986 }
5987
5988 void * const pvMem = pbMem;
5989
5990 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5991 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
5992 if (fAccess & IEM_ACCESS_TYPE_READ)
5993 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
5994
5995#else /* !IEM_WITH_DATA_TLB */
5996
5997 RTGCPHYS GCPhysFirst;
5998 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
5999 if (rcStrict != VINF_SUCCESS)
6000 return rcStrict;
6001
6002 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6003 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6004 if (fAccess & IEM_ACCESS_TYPE_READ)
6005 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6006
6007 void *pvMem;
6008 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6009 if (rcStrict != VINF_SUCCESS)
6010 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6011
6012#endif /* !IEM_WITH_DATA_TLB */
6013
6014 /*
6015 * Fill in the mapping table entry.
6016 */
6017 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6018 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6019 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6020 pVCpu->iem.s.cActiveMappings += 1;
6021
6022 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6023 *ppvMem = pvMem;
6024
6025 return VINF_SUCCESS;
6026}
6027
6028
6029/**
6030 * Commits the guest memory if bounce buffered and unmaps it.
6031 *
6032 * @returns Strict VBox status code.
6033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6034 * @param pvMem The mapping.
6035 * @param fAccess The kind of access.
6036 */
6037VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6038{
6039 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6040 AssertReturn(iMemMap >= 0, iMemMap);
6041
6042 /* If it's bounce buffered, we may need to write back the buffer. */
6043 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6044 {
6045 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6046 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6047 }
6048 /* Otherwise unlock it. */
6049 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6050 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6051
6052 /* Free the entry. */
6053 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6054 Assert(pVCpu->iem.s.cActiveMappings != 0);
6055 pVCpu->iem.s.cActiveMappings--;
6056 return VINF_SUCCESS;
6057}
6058
6059#ifdef IEM_WITH_SETJMP
6060
6061/**
6062 * Maps the specified guest memory for the given kind of access, longjmp on
6063 * error.
6064 *
6065 * This may be using bounce buffering of the memory if it's crossing a page
6066 * boundary or if there is an access handler installed for any of it. Because
6067 * of lock prefix guarantees, we're in for some extra clutter when this
6068 * happens.
6069 *
6070 * This may raise a \#GP, \#SS, \#PF or \#AC.
6071 *
6072 * @returns Pointer to the mapped memory.
6073 *
6074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6075 * @param cbMem The number of bytes to map. This is usually 1,
6076 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6077 * string operations it can be up to a page.
6078 * @param iSegReg The index of the segment register to use for
6079 * this access. The base and limits are checked.
6080 * Use UINT8_MAX to indicate that no segmentation
6081 * is required (for IDT, GDT and LDT accesses).
6082 * @param GCPtrMem The address of the guest memory.
6083 * @param fAccess How the memory is being accessed. The
6084 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6085 * how to map the memory, while the
6086 * IEM_ACCESS_WHAT_XXX bit is used when raising
6087 * exceptions.
6088 */
6089void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
6090{
6091 /*
6092 * Check the input, check segment access and adjust address
6093 * with segment base.
6094 */
6095 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6096 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6097 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6098
6099 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6100 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6101 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6102
6103 /*
6104 * Figure out which mapping entry to use.
6105 */
6106 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6107 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6108 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6109 {
6110 iMemMap = iemMemMapFindFree(pVCpu);
6111 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6112 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6113 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6114 pVCpu->iem.s.aMemMappings[2].fAccess),
6115 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6116 }
6117
6118 /*
6119 * Crossing a page boundary?
6120 */
6121 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6122 { /* No (likely). */ }
6123 else
6124 {
6125 void *pvMem;
6126 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6127 if (rcStrict == VINF_SUCCESS)
6128 return pvMem;
6129 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6130 }
6131
6132#ifdef IEM_WITH_DATA_TLB
6133 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6134
6135 /*
6136 * Get the TLB entry for this page.
6137 */
6138 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6139 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6140 if (pTlbe->uTag == uTag)
6141 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6142 else
6143 {
6144 pVCpu->iem.s.DataTlb.cTlbMisses++;
6145 PGMPTWALK Walk;
6146 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6147 if (RT_FAILURE(rc))
6148 {
6149 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6150# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6151 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6152 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6153# endif
6154 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6155 }
6156
6157 Assert(Walk.fSucceeded);
6158 pTlbe->uTag = uTag;
6159 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6160 pTlbe->GCPhys = Walk.GCPhys;
6161 pTlbe->pbMappingR3 = NULL;
6162 }
6163
6164 /*
6165 * Check the flags and physical revision.
6166 */
6167 /** @todo make the caller pass these in with fAccess. */
6168 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6169 ? IEMTLBE_F_PT_NO_USER : 0;
6170 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6171 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6172 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6173 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6174 ? IEMTLBE_F_PT_NO_WRITE : 0)
6175 : 0;
6176 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6177 uint8_t *pbMem = NULL;
6178 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6179 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6180# ifdef IN_RING3
6181 pbMem = pTlbe->pbMappingR3;
6182# else
6183 pbMem = NULL;
6184# endif
6185 else
6186 {
6187 /*
6188 * Okay, something isn't quite right or needs refreshing.
6189 */
6190 /* Write to read only memory? */
6191 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6192 {
6193 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6194# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6195 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6196 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6197# endif
6198 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6199 }
6200
6201 /* Kernel memory accessed by userland? */
6202 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6203 {
6204 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6205# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6206 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6207 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6208# endif
6209 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6210 }
6211
6212 /* Set the dirty / access flags.
6213 ASSUMES this is set when the address is translated rather than on commit... */
6214 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6215 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6216 {
6217 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6218 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6219 AssertRC(rc2);
6220 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6221 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6222 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6223 }
6224
6225 /*
6226 * Check if the physical page info needs updating.
6227 */
6228 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6229# ifdef IN_RING3
6230 pbMem = pTlbe->pbMappingR3;
6231# else
6232 pbMem = NULL;
6233# endif
6234 else
6235 {
6236 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6237 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6238 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6239 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6240 pTlbe->pbMappingR3 = NULL;
6241 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6242 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6243 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6244 &pbMem, &pTlbe->fFlagsAndPhysRev);
6245 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6246# ifdef IN_RING3
6247 pTlbe->pbMappingR3 = pbMem;
6248# endif
6249 }
6250
6251 /*
6252 * Check the physical page level access and mapping.
6253 */
6254 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6255 { /* probably likely */ }
6256 else
6257 {
6258 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6259 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6260 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6261 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6262 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6263 if (rcStrict == VINF_SUCCESS)
6264 return pbMem;
6265 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6266 }
6267 }
6268 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6269
6270 if (pbMem)
6271 {
6272 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6273 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6274 fAccess |= IEM_ACCESS_NOT_LOCKED;
6275 }
6276 else
6277 {
6278 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6279 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6280 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6281 if (rcStrict == VINF_SUCCESS)
6282 return pbMem;
6283 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6284 }
6285
6286 void * const pvMem = pbMem;
6287
6288 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6289 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6290 if (fAccess & IEM_ACCESS_TYPE_READ)
6291 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6292
6293#else /* !IEM_WITH_DATA_TLB */
6294
6295
6296 RTGCPHYS GCPhysFirst;
6297 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6298 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6299 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6300
6301 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6302 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6303 if (fAccess & IEM_ACCESS_TYPE_READ)
6304 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6305
6306 void *pvMem;
6307 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6308 if (rcStrict == VINF_SUCCESS)
6309 { /* likely */ }
6310 else
6311 {
6312 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6313 if (rcStrict == VINF_SUCCESS)
6314 return pvMem;
6315 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6316 }
6317
6318#endif /* !IEM_WITH_DATA_TLB */
6319
6320 /*
6321 * Fill in the mapping table entry.
6322 */
6323 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6324 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6325 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6326 pVCpu->iem.s.cActiveMappings++;
6327
6328 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6329 return pvMem;
6330}
6331
6332
6333/**
6334 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6335 *
6336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6337 * @param pvMem The mapping.
6338 * @param fAccess The kind of access.
6339 */
6340void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6341{
6342 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6343 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6344
6345 /* If it's bounce buffered, we may need to write back the buffer. */
6346 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6347 {
6348 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6349 {
6350 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6351 if (rcStrict == VINF_SUCCESS)
6352 return;
6353 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6354 }
6355 }
6356 /* Otherwise unlock it. */
6357 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6358 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6359
6360 /* Free the entry. */
6361 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6362 Assert(pVCpu->iem.s.cActiveMappings != 0);
6363 pVCpu->iem.s.cActiveMappings--;
6364}
6365
6366#endif /* IEM_WITH_SETJMP */
6367
6368#ifndef IN_RING3
6369/**
6370 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6371 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6372 *
6373 * Allows the instruction to be completed and retired, while the IEM user will
6374 * return to ring-3 immediately afterwards and do the postponed writes there.
6375 *
6376 * @returns VBox status code (no strict statuses). Caller must check
6377 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6379 * @param pvMem The mapping.
6380 * @param fAccess The kind of access.
6381 */
6382VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6383{
6384 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6385 AssertReturn(iMemMap >= 0, iMemMap);
6386
6387 /* If it's bounce buffered, we may need to write back the buffer. */
6388 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6389 {
6390 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6391 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6392 }
6393 /* Otherwise unlock it. */
6394 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6395 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6396
6397 /* Free the entry. */
6398 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6399 Assert(pVCpu->iem.s.cActiveMappings != 0);
6400 pVCpu->iem.s.cActiveMappings--;
6401 return VINF_SUCCESS;
6402}
6403#endif
6404
6405
6406/**
6407 * Rollbacks mappings, releasing page locks and such.
6408 *
6409 * The caller shall only call this after checking cActiveMappings.
6410 *
6411 * @returns Strict VBox status code to pass up.
6412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6413 */
6414void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6415{
6416 Assert(pVCpu->iem.s.cActiveMappings > 0);
6417
6418 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6419 while (iMemMap-- > 0)
6420 {
6421 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6422 if (fAccess != IEM_ACCESS_INVALID)
6423 {
6424 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6425 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6426 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6427 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6428 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6429 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6430 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6432 pVCpu->iem.s.cActiveMappings--;
6433 }
6434 }
6435}
6436
6437
6438/**
6439 * Fetches a data byte.
6440 *
6441 * @returns Strict VBox status code.
6442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6443 * @param pu8Dst Where to return the byte.
6444 * @param iSegReg The index of the segment register to use for
6445 * this access. The base and limits are checked.
6446 * @param GCPtrMem The address of the guest memory.
6447 */
6448VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6449{
6450 /* The lazy approach for now... */
6451 uint8_t const *pu8Src;
6452 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6453 if (rc == VINF_SUCCESS)
6454 {
6455 *pu8Dst = *pu8Src;
6456 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6457 }
6458 return rc;
6459}
6460
6461
6462#ifdef IEM_WITH_SETJMP
6463/**
6464 * Fetches a data byte, longjmp on error.
6465 *
6466 * @returns The byte.
6467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6468 * @param iSegReg The index of the segment register to use for
6469 * this access. The base and limits are checked.
6470 * @param GCPtrMem The address of the guest memory.
6471 */
6472uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6473{
6474 /* The lazy approach for now... */
6475 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6476 uint8_t const bRet = *pu8Src;
6477 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6478 return bRet;
6479}
6480#endif /* IEM_WITH_SETJMP */
6481
6482
6483/**
6484 * Fetches a data word.
6485 *
6486 * @returns Strict VBox status code.
6487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6488 * @param pu16Dst Where to return the word.
6489 * @param iSegReg The index of the segment register to use for
6490 * this access. The base and limits are checked.
6491 * @param GCPtrMem The address of the guest memory.
6492 */
6493VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6494{
6495 /* The lazy approach for now... */
6496 uint16_t const *pu16Src;
6497 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6498 if (rc == VINF_SUCCESS)
6499 {
6500 *pu16Dst = *pu16Src;
6501 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6502 }
6503 return rc;
6504}
6505
6506
6507#ifdef IEM_WITH_SETJMP
6508/**
6509 * Fetches a data word, longjmp on error.
6510 *
6511 * @returns The word
6512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6513 * @param iSegReg The index of the segment register to use for
6514 * this access. The base and limits are checked.
6515 * @param GCPtrMem The address of the guest memory.
6516 */
6517uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6518{
6519 /* The lazy approach for now... */
6520 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6521 uint16_t const u16Ret = *pu16Src;
6522 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6523 return u16Ret;
6524}
6525#endif
6526
6527
6528/**
6529 * Fetches a data dword.
6530 *
6531 * @returns Strict VBox status code.
6532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6533 * @param pu32Dst Where to return the dword.
6534 * @param iSegReg The index of the segment register to use for
6535 * this access. The base and limits are checked.
6536 * @param GCPtrMem The address of the guest memory.
6537 */
6538VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6539{
6540 /* The lazy approach for now... */
6541 uint32_t const *pu32Src;
6542 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6543 if (rc == VINF_SUCCESS)
6544 {
6545 *pu32Dst = *pu32Src;
6546 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6547 }
6548 return rc;
6549}
6550
6551
6552/**
6553 * Fetches a data dword and zero extends it to a qword.
6554 *
6555 * @returns Strict VBox status code.
6556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6557 * @param pu64Dst Where to return the qword.
6558 * @param iSegReg The index of the segment register to use for
6559 * this access. The base and limits are checked.
6560 * @param GCPtrMem The address of the guest memory.
6561 */
6562VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6563{
6564 /* The lazy approach for now... */
6565 uint32_t const *pu32Src;
6566 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6567 if (rc == VINF_SUCCESS)
6568 {
6569 *pu64Dst = *pu32Src;
6570 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6571 }
6572 return rc;
6573}
6574
6575
6576#ifdef IEM_WITH_SETJMP
6577
6578/**
6579 * Fetches a data dword, longjmp on error, fallback/safe version.
6580 *
6581 * @returns The dword
6582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6583 * @param iSegReg The index of the segment register to use for
6584 * this access. The base and limits are checked.
6585 * @param GCPtrMem The address of the guest memory.
6586 */
6587uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6588{
6589 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6590 uint32_t const u32Ret = *pu32Src;
6591 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6592 return u32Ret;
6593}
6594
6595
6596/**
6597 * Fetches a data dword, longjmp on error.
6598 *
6599 * @returns The dword
6600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6601 * @param iSegReg The index of the segment register to use for
6602 * this access. The base and limits are checked.
6603 * @param GCPtrMem The address of the guest memory.
6604 */
6605uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6606{
6607# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6608 /*
6609 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6610 */
6611 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6612 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6613 {
6614 /*
6615 * TLB lookup.
6616 */
6617 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6618 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6619 if (pTlbe->uTag == uTag)
6620 {
6621 /*
6622 * Check TLB page table level access flags.
6623 */
6624 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6625 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6626 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6627 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6628 {
6629 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6630
6631 /*
6632 * Alignment check:
6633 */
6634 /** @todo check priority \#AC vs \#PF */
6635 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6636 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6637 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6638 || pVCpu->iem.s.uCpl != 3)
6639 {
6640 /*
6641 * Fetch and return the dword
6642 */
6643 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6644 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6645 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6646 }
6647 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6648 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6649 }
6650 }
6651 }
6652
6653 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6654 outdated page pointer, or other troubles. */
6655 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6656 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6657
6658# else
6659 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6660 uint32_t const u32Ret = *pu32Src;
6661 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6662 return u32Ret;
6663# endif
6664}
6665#endif
6666
6667
6668#ifdef SOME_UNUSED_FUNCTION
6669/**
6670 * Fetches a data dword and sign extends it to a qword.
6671 *
6672 * @returns Strict VBox status code.
6673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6674 * @param pu64Dst Where to return the sign extended value.
6675 * @param iSegReg The index of the segment register to use for
6676 * this access. The base and limits are checked.
6677 * @param GCPtrMem The address of the guest memory.
6678 */
6679VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6680{
6681 /* The lazy approach for now... */
6682 int32_t const *pi32Src;
6683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6684 if (rc == VINF_SUCCESS)
6685 {
6686 *pu64Dst = *pi32Src;
6687 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6688 }
6689#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6690 else
6691 *pu64Dst = 0;
6692#endif
6693 return rc;
6694}
6695#endif
6696
6697
6698/**
6699 * Fetches a data qword.
6700 *
6701 * @returns Strict VBox status code.
6702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6703 * @param pu64Dst Where to return the qword.
6704 * @param iSegReg The index of the segment register to use for
6705 * this access. The base and limits are checked.
6706 * @param GCPtrMem The address of the guest memory.
6707 */
6708VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6709{
6710 /* The lazy approach for now... */
6711 uint64_t const *pu64Src;
6712 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6713 if (rc == VINF_SUCCESS)
6714 {
6715 *pu64Dst = *pu64Src;
6716 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6717 }
6718 return rc;
6719}
6720
6721
6722#ifdef IEM_WITH_SETJMP
6723/**
6724 * Fetches a data qword, longjmp on error.
6725 *
6726 * @returns The qword.
6727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6728 * @param iSegReg The index of the segment register to use for
6729 * this access. The base and limits are checked.
6730 * @param GCPtrMem The address of the guest memory.
6731 */
6732uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6733{
6734 /* The lazy approach for now... */
6735 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6736 uint64_t const u64Ret = *pu64Src;
6737 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6738 return u64Ret;
6739}
6740#endif
6741
6742
6743/**
6744 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6745 *
6746 * @returns Strict VBox status code.
6747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6748 * @param pu64Dst Where to return the qword.
6749 * @param iSegReg The index of the segment register to use for
6750 * this access. The base and limits are checked.
6751 * @param GCPtrMem The address of the guest memory.
6752 */
6753VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6754{
6755 /* The lazy approach for now... */
6756 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6757 if (RT_UNLIKELY(GCPtrMem & 15))
6758 return iemRaiseGeneralProtectionFault0(pVCpu);
6759
6760 uint64_t const *pu64Src;
6761 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6762 if (rc == VINF_SUCCESS)
6763 {
6764 *pu64Dst = *pu64Src;
6765 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6766 }
6767 return rc;
6768}
6769
6770
6771#ifdef IEM_WITH_SETJMP
6772/**
6773 * Fetches a data qword, longjmp on error.
6774 *
6775 * @returns The qword.
6776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6777 * @param iSegReg The index of the segment register to use for
6778 * this access. The base and limits are checked.
6779 * @param GCPtrMem The address of the guest memory.
6780 */
6781uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6782{
6783 /* The lazy approach for now... */
6784 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6785 if (RT_LIKELY(!(GCPtrMem & 15)))
6786 {
6787 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6788 uint64_t const u64Ret = *pu64Src;
6789 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6790 return u64Ret;
6791 }
6792
6793 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
6794 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
6795}
6796#endif
6797
6798
6799/**
6800 * Fetches a data tword.
6801 *
6802 * @returns Strict VBox status code.
6803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6804 * @param pr80Dst Where to return the tword.
6805 * @param iSegReg The index of the segment register to use for
6806 * this access. The base and limits are checked.
6807 * @param GCPtrMem The address of the guest memory.
6808 */
6809VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6810{
6811 /* The lazy approach for now... */
6812 PCRTFLOAT80U pr80Src;
6813 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6814 if (rc == VINF_SUCCESS)
6815 {
6816 *pr80Dst = *pr80Src;
6817 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6818 }
6819 return rc;
6820}
6821
6822
6823#ifdef IEM_WITH_SETJMP
6824/**
6825 * Fetches a data tword, longjmp on error.
6826 *
6827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6828 * @param pr80Dst Where to return the tword.
6829 * @param iSegReg The index of the segment register to use for
6830 * this access. The base and limits are checked.
6831 * @param GCPtrMem The address of the guest memory.
6832 */
6833void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6834{
6835 /* The lazy approach for now... */
6836 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6837 *pr80Dst = *pr80Src;
6838 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6839}
6840#endif
6841
6842
6843/**
6844 * Fetches a data tword.
6845 *
6846 * @returns Strict VBox status code.
6847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6848 * @param pd80Dst Where to return the tword.
6849 * @param iSegReg The index of the segment register to use for
6850 * this access. The base and limits are checked.
6851 * @param GCPtrMem The address of the guest memory.
6852 */
6853VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6854{
6855 /* The lazy approach for now... */
6856 PCRTPBCD80U pd80Src;
6857 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6858 if (rc == VINF_SUCCESS)
6859 {
6860 *pd80Dst = *pd80Src;
6861 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6862 }
6863 return rc;
6864}
6865
6866
6867#ifdef IEM_WITH_SETJMP
6868/**
6869 * Fetches a data tword, longjmp on error.
6870 *
6871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6872 * @param pd80Dst Where to return the tword.
6873 * @param iSegReg The index of the segment register to use for
6874 * this access. The base and limits are checked.
6875 * @param GCPtrMem The address of the guest memory.
6876 */
6877void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6878{
6879 /* The lazy approach for now... */
6880 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6881 *pd80Dst = *pd80Src;
6882 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6883}
6884#endif
6885
6886
6887/**
6888 * Fetches a data dqword (double qword), generally SSE related.
6889 *
6890 * @returns Strict VBox status code.
6891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6892 * @param pu128Dst Where to return the qword.
6893 * @param iSegReg The index of the segment register to use for
6894 * this access. The base and limits are checked.
6895 * @param GCPtrMem The address of the guest memory.
6896 */
6897VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6898{
6899 /* The lazy approach for now... */
6900 PCRTUINT128U pu128Src;
6901 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6902 if (rc == VINF_SUCCESS)
6903 {
6904 pu128Dst->au64[0] = pu128Src->au64[0];
6905 pu128Dst->au64[1] = pu128Src->au64[1];
6906 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6907 }
6908 return rc;
6909}
6910
6911
6912#ifdef IEM_WITH_SETJMP
6913/**
6914 * Fetches a data dqword (double qword), generally SSE related.
6915 *
6916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6917 * @param pu128Dst Where to return the qword.
6918 * @param iSegReg The index of the segment register to use for
6919 * this access. The base and limits are checked.
6920 * @param GCPtrMem The address of the guest memory.
6921 */
6922void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6923{
6924 /* The lazy approach for now... */
6925 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6926 pu128Dst->au64[0] = pu128Src->au64[0];
6927 pu128Dst->au64[1] = pu128Src->au64[1];
6928 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6929}
6930#endif
6931
6932
6933/**
6934 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6935 * related.
6936 *
6937 * Raises \#GP(0) if not aligned.
6938 *
6939 * @returns Strict VBox status code.
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 * @param pu128Dst Where to return the qword.
6942 * @param iSegReg The index of the segment register to use for
6943 * this access. The base and limits are checked.
6944 * @param GCPtrMem The address of the guest memory.
6945 */
6946VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6947{
6948 /* The lazy approach for now... */
6949 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6950 if ( (GCPtrMem & 15)
6951 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6952 return iemRaiseGeneralProtectionFault0(pVCpu);
6953
6954 PCRTUINT128U pu128Src;
6955 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6956 if (rc == VINF_SUCCESS)
6957 {
6958 pu128Dst->au64[0] = pu128Src->au64[0];
6959 pu128Dst->au64[1] = pu128Src->au64[1];
6960 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6961 }
6962 return rc;
6963}
6964
6965
6966#ifdef IEM_WITH_SETJMP
6967/**
6968 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6969 * related, longjmp on error.
6970 *
6971 * Raises \#GP(0) if not aligned.
6972 *
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 * @param pu128Dst Where to return the qword.
6975 * @param iSegReg The index of the segment register to use for
6976 * this access. The base and limits are checked.
6977 * @param GCPtrMem The address of the guest memory.
6978 */
6979void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6980{
6981 /* The lazy approach for now... */
6982 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6983 if ( (GCPtrMem & 15) == 0
6984 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6985 {
6986 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6987 pu128Dst->au64[0] = pu128Src->au64[0];
6988 pu128Dst->au64[1] = pu128Src->au64[1];
6989 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6990 return;
6991 }
6992
6993 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
6994 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6995}
6996#endif
6997
6998
6999/**
7000 * Fetches a data oword (octo word), generally AVX related.
7001 *
7002 * @returns Strict VBox status code.
7003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7004 * @param pu256Dst Where to return the qword.
7005 * @param iSegReg The index of the segment register to use for
7006 * this access. The base and limits are checked.
7007 * @param GCPtrMem The address of the guest memory.
7008 */
7009VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7010{
7011 /* The lazy approach for now... */
7012 PCRTUINT256U pu256Src;
7013 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7014 if (rc == VINF_SUCCESS)
7015 {
7016 pu256Dst->au64[0] = pu256Src->au64[0];
7017 pu256Dst->au64[1] = pu256Src->au64[1];
7018 pu256Dst->au64[2] = pu256Src->au64[2];
7019 pu256Dst->au64[3] = pu256Src->au64[3];
7020 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7021 }
7022 return rc;
7023}
7024
7025
7026#ifdef IEM_WITH_SETJMP
7027/**
7028 * Fetches a data oword (octo word), generally AVX related.
7029 *
7030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7031 * @param pu256Dst Where to return the qword.
7032 * @param iSegReg The index of the segment register to use for
7033 * this access. The base and limits are checked.
7034 * @param GCPtrMem The address of the guest memory.
7035 */
7036void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7037{
7038 /* The lazy approach for now... */
7039 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7040 pu256Dst->au64[0] = pu256Src->au64[0];
7041 pu256Dst->au64[1] = pu256Src->au64[1];
7042 pu256Dst->au64[2] = pu256Src->au64[2];
7043 pu256Dst->au64[3] = pu256Src->au64[3];
7044 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7045}
7046#endif
7047
7048
7049/**
7050 * Fetches a data oword (octo word) at an aligned address, generally AVX
7051 * related.
7052 *
7053 * Raises \#GP(0) if not aligned.
7054 *
7055 * @returns Strict VBox status code.
7056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7057 * @param pu256Dst Where to return the qword.
7058 * @param iSegReg The index of the segment register to use for
7059 * this access. The base and limits are checked.
7060 * @param GCPtrMem The address of the guest memory.
7061 */
7062VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7063{
7064 /* The lazy approach for now... */
7065 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
7066 if (GCPtrMem & 31)
7067 return iemRaiseGeneralProtectionFault0(pVCpu);
7068
7069 PCRTUINT256U pu256Src;
7070 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7071 if (rc == VINF_SUCCESS)
7072 {
7073 pu256Dst->au64[0] = pu256Src->au64[0];
7074 pu256Dst->au64[1] = pu256Src->au64[1];
7075 pu256Dst->au64[2] = pu256Src->au64[2];
7076 pu256Dst->au64[3] = pu256Src->au64[3];
7077 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7078 }
7079 return rc;
7080}
7081
7082
7083#ifdef IEM_WITH_SETJMP
7084/**
7085 * Fetches a data oword (octo word) at an aligned address, generally AVX
7086 * related, longjmp on error.
7087 *
7088 * Raises \#GP(0) if not aligned.
7089 *
7090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7091 * @param pu256Dst Where to return the qword.
7092 * @param iSegReg The index of the segment register to use for
7093 * this access. The base and limits are checked.
7094 * @param GCPtrMem The address of the guest memory.
7095 */
7096void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7097{
7098 /* The lazy approach for now... */
7099 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
7100 if ((GCPtrMem & 31) == 0)
7101 {
7102 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7103 pu256Dst->au64[0] = pu256Src->au64[0];
7104 pu256Dst->au64[1] = pu256Src->au64[1];
7105 pu256Dst->au64[2] = pu256Src->au64[2];
7106 pu256Dst->au64[3] = pu256Src->au64[3];
7107 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7108 return;
7109 }
7110
7111 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7112 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7113}
7114#endif
7115
7116
7117
7118/**
7119 * Fetches a descriptor register (lgdt, lidt).
7120 *
7121 * @returns Strict VBox status code.
7122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7123 * @param pcbLimit Where to return the limit.
7124 * @param pGCPtrBase Where to return the base.
7125 * @param iSegReg The index of the segment register to use for
7126 * this access. The base and limits are checked.
7127 * @param GCPtrMem The address of the guest memory.
7128 * @param enmOpSize The effective operand size.
7129 */
7130VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7131 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7132{
7133 /*
7134 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7135 * little special:
7136 * - The two reads are done separately.
7137 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7138 * - We suspect the 386 to actually commit the limit before the base in
7139 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7140 * don't try emulate this eccentric behavior, because it's not well
7141 * enough understood and rather hard to trigger.
7142 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7143 */
7144 VBOXSTRICTRC rcStrict;
7145 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7146 {
7147 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7148 if (rcStrict == VINF_SUCCESS)
7149 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7150 }
7151 else
7152 {
7153 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7154 if (enmOpSize == IEMMODE_32BIT)
7155 {
7156 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7157 {
7158 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7159 if (rcStrict == VINF_SUCCESS)
7160 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7161 }
7162 else
7163 {
7164 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7165 if (rcStrict == VINF_SUCCESS)
7166 {
7167 *pcbLimit = (uint16_t)uTmp;
7168 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7169 }
7170 }
7171 if (rcStrict == VINF_SUCCESS)
7172 *pGCPtrBase = uTmp;
7173 }
7174 else
7175 {
7176 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7177 if (rcStrict == VINF_SUCCESS)
7178 {
7179 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7180 if (rcStrict == VINF_SUCCESS)
7181 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7182 }
7183 }
7184 }
7185 return rcStrict;
7186}
7187
7188
7189
7190/**
7191 * Stores a data byte.
7192 *
7193 * @returns Strict VBox status code.
7194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7195 * @param iSegReg The index of the segment register to use for
7196 * this access. The base and limits are checked.
7197 * @param GCPtrMem The address of the guest memory.
7198 * @param u8Value The value to store.
7199 */
7200VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7201{
7202 /* The lazy approach for now... */
7203 uint8_t *pu8Dst;
7204 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7205 if (rc == VINF_SUCCESS)
7206 {
7207 *pu8Dst = u8Value;
7208 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7209 }
7210 return rc;
7211}
7212
7213
7214#ifdef IEM_WITH_SETJMP
7215/**
7216 * Stores a data byte, longjmp on error.
7217 *
7218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7219 * @param iSegReg The index of the segment register to use for
7220 * this access. The base and limits are checked.
7221 * @param GCPtrMem The address of the guest memory.
7222 * @param u8Value The value to store.
7223 */
7224void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7225{
7226 /* The lazy approach for now... */
7227 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7228 *pu8Dst = u8Value;
7229 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7230}
7231#endif
7232
7233
7234/**
7235 * Stores a data word.
7236 *
7237 * @returns Strict VBox status code.
7238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7239 * @param iSegReg The index of the segment register to use for
7240 * this access. The base and limits are checked.
7241 * @param GCPtrMem The address of the guest memory.
7242 * @param u16Value The value to store.
7243 */
7244VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7245{
7246 /* The lazy approach for now... */
7247 uint16_t *pu16Dst;
7248 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7249 if (rc == VINF_SUCCESS)
7250 {
7251 *pu16Dst = u16Value;
7252 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7253 }
7254 return rc;
7255}
7256
7257
7258#ifdef IEM_WITH_SETJMP
7259/**
7260 * Stores a data word, longjmp on error.
7261 *
7262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7263 * @param iSegReg The index of the segment register to use for
7264 * this access. The base and limits are checked.
7265 * @param GCPtrMem The address of the guest memory.
7266 * @param u16Value The value to store.
7267 */
7268void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7269{
7270 /* The lazy approach for now... */
7271 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7272 *pu16Dst = u16Value;
7273 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7274}
7275#endif
7276
7277
7278/**
7279 * Stores a data dword.
7280 *
7281 * @returns Strict VBox status code.
7282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7283 * @param iSegReg The index of the segment register to use for
7284 * this access. The base and limits are checked.
7285 * @param GCPtrMem The address of the guest memory.
7286 * @param u32Value The value to store.
7287 */
7288VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7289{
7290 /* The lazy approach for now... */
7291 uint32_t *pu32Dst;
7292 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7293 if (rc == VINF_SUCCESS)
7294 {
7295 *pu32Dst = u32Value;
7296 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7297 }
7298 return rc;
7299}
7300
7301
7302#ifdef IEM_WITH_SETJMP
7303/**
7304 * Stores a data dword.
7305 *
7306 * @returns Strict VBox status code.
7307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7308 * @param iSegReg The index of the segment register to use for
7309 * this access. The base and limits are checked.
7310 * @param GCPtrMem The address of the guest memory.
7311 * @param u32Value The value to store.
7312 */
7313void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7314{
7315 /* The lazy approach for now... */
7316 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7317 *pu32Dst = u32Value;
7318 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7319}
7320#endif
7321
7322
7323/**
7324 * Stores a data qword.
7325 *
7326 * @returns Strict VBox status code.
7327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7328 * @param iSegReg The index of the segment register to use for
7329 * this access. The base and limits are checked.
7330 * @param GCPtrMem The address of the guest memory.
7331 * @param u64Value The value to store.
7332 */
7333VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7334{
7335 /* The lazy approach for now... */
7336 uint64_t *pu64Dst;
7337 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7338 if (rc == VINF_SUCCESS)
7339 {
7340 *pu64Dst = u64Value;
7341 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7342 }
7343 return rc;
7344}
7345
7346
7347#ifdef IEM_WITH_SETJMP
7348/**
7349 * Stores a data qword, longjmp on error.
7350 *
7351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7352 * @param iSegReg The index of the segment register to use for
7353 * this access. The base and limits are checked.
7354 * @param GCPtrMem The address of the guest memory.
7355 * @param u64Value The value to store.
7356 */
7357void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7358{
7359 /* The lazy approach for now... */
7360 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7361 *pu64Dst = u64Value;
7362 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7363}
7364#endif
7365
7366
7367/**
7368 * Stores a data dqword.
7369 *
7370 * @returns Strict VBox status code.
7371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7372 * @param iSegReg The index of the segment register to use for
7373 * this access. The base and limits are checked.
7374 * @param GCPtrMem The address of the guest memory.
7375 * @param u128Value The value to store.
7376 */
7377VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7378{
7379 /* The lazy approach for now... */
7380 PRTUINT128U pu128Dst;
7381 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7382 if (rc == VINF_SUCCESS)
7383 {
7384 pu128Dst->au64[0] = u128Value.au64[0];
7385 pu128Dst->au64[1] = u128Value.au64[1];
7386 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7387 }
7388 return rc;
7389}
7390
7391
7392#ifdef IEM_WITH_SETJMP
7393/**
7394 * Stores a data dqword, longjmp on error.
7395 *
7396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7397 * @param iSegReg The index of the segment register to use for
7398 * this access. The base and limits are checked.
7399 * @param GCPtrMem The address of the guest memory.
7400 * @param u128Value The value to store.
7401 */
7402void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7403{
7404 /* The lazy approach for now... */
7405 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7406 pu128Dst->au64[0] = u128Value.au64[0];
7407 pu128Dst->au64[1] = u128Value.au64[1];
7408 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7409}
7410#endif
7411
7412
7413/**
7414 * Stores a data dqword, SSE aligned.
7415 *
7416 * @returns Strict VBox status code.
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 * @param iSegReg The index of the segment register to use for
7419 * this access. The base and limits are checked.
7420 * @param GCPtrMem The address of the guest memory.
7421 * @param u128Value The value to store.
7422 */
7423VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7424{
7425 /* The lazy approach for now... */
7426 if ( (GCPtrMem & 15)
7427 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7428 return iemRaiseGeneralProtectionFault0(pVCpu);
7429
7430 PRTUINT128U pu128Dst;
7431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7432 if (rc == VINF_SUCCESS)
7433 {
7434 pu128Dst->au64[0] = u128Value.au64[0];
7435 pu128Dst->au64[1] = u128Value.au64[1];
7436 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7437 }
7438 return rc;
7439}
7440
7441
7442#ifdef IEM_WITH_SETJMP
7443/**
7444 * Stores a data dqword, SSE aligned.
7445 *
7446 * @returns Strict VBox status code.
7447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7448 * @param iSegReg The index of the segment register to use for
7449 * this access. The base and limits are checked.
7450 * @param GCPtrMem The address of the guest memory.
7451 * @param u128Value The value to store.
7452 */
7453void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7454{
7455 /* The lazy approach for now... */
7456 if ( (GCPtrMem & 15) == 0
7457 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7458 {
7459 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7460 pu128Dst->au64[0] = u128Value.au64[0];
7461 pu128Dst->au64[1] = u128Value.au64[1];
7462 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7463 return;
7464 }
7465
7466 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7467 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7468}
7469#endif
7470
7471
7472/**
7473 * Stores a data dqword.
7474 *
7475 * @returns Strict VBox status code.
7476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7477 * @param iSegReg The index of the segment register to use for
7478 * this access. The base and limits are checked.
7479 * @param GCPtrMem The address of the guest memory.
7480 * @param pu256Value Pointer to the value to store.
7481 */
7482VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7483{
7484 /* The lazy approach for now... */
7485 PRTUINT256U pu256Dst;
7486 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7487 if (rc == VINF_SUCCESS)
7488 {
7489 pu256Dst->au64[0] = pu256Value->au64[0];
7490 pu256Dst->au64[1] = pu256Value->au64[1];
7491 pu256Dst->au64[2] = pu256Value->au64[2];
7492 pu256Dst->au64[3] = pu256Value->au64[3];
7493 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7494 }
7495 return rc;
7496}
7497
7498
7499#ifdef IEM_WITH_SETJMP
7500/**
7501 * Stores a data dqword, longjmp on error.
7502 *
7503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7504 * @param iSegReg The index of the segment register to use for
7505 * this access. The base and limits are checked.
7506 * @param GCPtrMem The address of the guest memory.
7507 * @param pu256Value Pointer to the value to store.
7508 */
7509void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7510{
7511 /* The lazy approach for now... */
7512 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7513 pu256Dst->au64[0] = pu256Value->au64[0];
7514 pu256Dst->au64[1] = pu256Value->au64[1];
7515 pu256Dst->au64[2] = pu256Value->au64[2];
7516 pu256Dst->au64[3] = pu256Value->au64[3];
7517 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7518}
7519#endif
7520
7521
7522/**
7523 * Stores a data dqword, AVX aligned.
7524 *
7525 * @returns Strict VBox status code.
7526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7527 * @param iSegReg The index of the segment register to use for
7528 * this access. The base and limits are checked.
7529 * @param GCPtrMem The address of the guest memory.
7530 * @param pu256Value Pointer to the value to store.
7531 */
7532VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7533{
7534 /* The lazy approach for now... */
7535 if (GCPtrMem & 31)
7536 return iemRaiseGeneralProtectionFault0(pVCpu);
7537
7538 PRTUINT256U pu256Dst;
7539 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7540 if (rc == VINF_SUCCESS)
7541 {
7542 pu256Dst->au64[0] = pu256Value->au64[0];
7543 pu256Dst->au64[1] = pu256Value->au64[1];
7544 pu256Dst->au64[2] = pu256Value->au64[2];
7545 pu256Dst->au64[3] = pu256Value->au64[3];
7546 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7547 }
7548 return rc;
7549}
7550
7551
7552#ifdef IEM_WITH_SETJMP
7553/**
7554 * Stores a data dqword, AVX aligned.
7555 *
7556 * @returns Strict VBox status code.
7557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7558 * @param iSegReg The index of the segment register to use for
7559 * this access. The base and limits are checked.
7560 * @param GCPtrMem The address of the guest memory.
7561 * @param pu256Value Pointer to the value to store.
7562 */
7563void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7564{
7565 /* The lazy approach for now... */
7566 if ((GCPtrMem & 31) == 0)
7567 {
7568 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7569 pu256Dst->au64[0] = pu256Value->au64[0];
7570 pu256Dst->au64[1] = pu256Value->au64[1];
7571 pu256Dst->au64[2] = pu256Value->au64[2];
7572 pu256Dst->au64[3] = pu256Value->au64[3];
7573 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7574 return;
7575 }
7576
7577 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7578 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7579}
7580#endif
7581
7582
7583/**
7584 * Stores a descriptor register (sgdt, sidt).
7585 *
7586 * @returns Strict VBox status code.
7587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7588 * @param cbLimit The limit.
7589 * @param GCPtrBase The base address.
7590 * @param iSegReg The index of the segment register to use for
7591 * this access. The base and limits are checked.
7592 * @param GCPtrMem The address of the guest memory.
7593 */
7594VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7595{
7596 /*
7597 * The SIDT and SGDT instructions actually stores the data using two
7598 * independent writes. The instructions does not respond to opsize prefixes.
7599 */
7600 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7601 if (rcStrict == VINF_SUCCESS)
7602 {
7603 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7604 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7605 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7606 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7607 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7608 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7609 else
7610 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7611 }
7612 return rcStrict;
7613}
7614
7615
7616/**
7617 * Pushes a word onto the stack.
7618 *
7619 * @returns Strict VBox status code.
7620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7621 * @param u16Value The value to push.
7622 */
7623VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7624{
7625 /* Increment the stack pointer. */
7626 uint64_t uNewRsp;
7627 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7628
7629 /* Write the word the lazy way. */
7630 uint16_t *pu16Dst;
7631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7632 if (rc == VINF_SUCCESS)
7633 {
7634 *pu16Dst = u16Value;
7635 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7636 }
7637
7638 /* Commit the new RSP value unless we an access handler made trouble. */
7639 if (rc == VINF_SUCCESS)
7640 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7641
7642 return rc;
7643}
7644
7645
7646/**
7647 * Pushes a dword onto the stack.
7648 *
7649 * @returns Strict VBox status code.
7650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7651 * @param u32Value The value to push.
7652 */
7653VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7654{
7655 /* Increment the stack pointer. */
7656 uint64_t uNewRsp;
7657 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7658
7659 /* Write the dword the lazy way. */
7660 uint32_t *pu32Dst;
7661 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7662 if (rc == VINF_SUCCESS)
7663 {
7664 *pu32Dst = u32Value;
7665 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7666 }
7667
7668 /* Commit the new RSP value unless we an access handler made trouble. */
7669 if (rc == VINF_SUCCESS)
7670 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7671
7672 return rc;
7673}
7674
7675
7676/**
7677 * Pushes a dword segment register value onto the stack.
7678 *
7679 * @returns Strict VBox status code.
7680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7681 * @param u32Value The value to push.
7682 */
7683VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7684{
7685 /* Increment the stack pointer. */
7686 uint64_t uNewRsp;
7687 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7688
7689 /* The intel docs talks about zero extending the selector register
7690 value. My actual intel CPU here might be zero extending the value
7691 but it still only writes the lower word... */
7692 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7693 * happens when crossing an electric page boundrary, is the high word checked
7694 * for write accessibility or not? Probably it is. What about segment limits?
7695 * It appears this behavior is also shared with trap error codes.
7696 *
7697 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7698 * ancient hardware when it actually did change. */
7699 uint16_t *pu16Dst;
7700 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7701 if (rc == VINF_SUCCESS)
7702 {
7703 *pu16Dst = (uint16_t)u32Value;
7704 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7705 }
7706
7707 /* Commit the new RSP value unless we an access handler made trouble. */
7708 if (rc == VINF_SUCCESS)
7709 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7710
7711 return rc;
7712}
7713
7714
7715/**
7716 * Pushes a qword onto the stack.
7717 *
7718 * @returns Strict VBox status code.
7719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7720 * @param u64Value The value to push.
7721 */
7722VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7723{
7724 /* Increment the stack pointer. */
7725 uint64_t uNewRsp;
7726 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7727
7728 /* Write the word the lazy way. */
7729 uint64_t *pu64Dst;
7730 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7731 if (rc == VINF_SUCCESS)
7732 {
7733 *pu64Dst = u64Value;
7734 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7735 }
7736
7737 /* Commit the new RSP value unless we an access handler made trouble. */
7738 if (rc == VINF_SUCCESS)
7739 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7740
7741 return rc;
7742}
7743
7744
7745/**
7746 * Pops a word from the stack.
7747 *
7748 * @returns Strict VBox status code.
7749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7750 * @param pu16Value Where to store the popped value.
7751 */
7752VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7753{
7754 /* Increment the stack pointer. */
7755 uint64_t uNewRsp;
7756 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7757
7758 /* Write the word the lazy way. */
7759 uint16_t const *pu16Src;
7760 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7761 if (rc == VINF_SUCCESS)
7762 {
7763 *pu16Value = *pu16Src;
7764 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7765
7766 /* Commit the new RSP value. */
7767 if (rc == VINF_SUCCESS)
7768 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7769 }
7770
7771 return rc;
7772}
7773
7774
7775/**
7776 * Pops a dword from the stack.
7777 *
7778 * @returns Strict VBox status code.
7779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7780 * @param pu32Value Where to store the popped value.
7781 */
7782VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7783{
7784 /* Increment the stack pointer. */
7785 uint64_t uNewRsp;
7786 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7787
7788 /* Write the word the lazy way. */
7789 uint32_t const *pu32Src;
7790 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7791 if (rc == VINF_SUCCESS)
7792 {
7793 *pu32Value = *pu32Src;
7794 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7795
7796 /* Commit the new RSP value. */
7797 if (rc == VINF_SUCCESS)
7798 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7799 }
7800
7801 return rc;
7802}
7803
7804
7805/**
7806 * Pops a qword from the stack.
7807 *
7808 * @returns Strict VBox status code.
7809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7810 * @param pu64Value Where to store the popped value.
7811 */
7812VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7813{
7814 /* Increment the stack pointer. */
7815 uint64_t uNewRsp;
7816 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7817
7818 /* Write the word the lazy way. */
7819 uint64_t const *pu64Src;
7820 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7821 if (rc == VINF_SUCCESS)
7822 {
7823 *pu64Value = *pu64Src;
7824 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7825
7826 /* Commit the new RSP value. */
7827 if (rc == VINF_SUCCESS)
7828 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7829 }
7830
7831 return rc;
7832}
7833
7834
7835/**
7836 * Pushes a word onto the stack, using a temporary stack pointer.
7837 *
7838 * @returns Strict VBox status code.
7839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7840 * @param u16Value The value to push.
7841 * @param pTmpRsp Pointer to the temporary stack pointer.
7842 */
7843VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7844{
7845 /* Increment the stack pointer. */
7846 RTUINT64U NewRsp = *pTmpRsp;
7847 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
7848
7849 /* Write the word the lazy way. */
7850 uint16_t *pu16Dst;
7851 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7852 if (rc == VINF_SUCCESS)
7853 {
7854 *pu16Dst = u16Value;
7855 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7856 }
7857
7858 /* Commit the new RSP value unless we an access handler made trouble. */
7859 if (rc == VINF_SUCCESS)
7860 *pTmpRsp = NewRsp;
7861
7862 return rc;
7863}
7864
7865
7866/**
7867 * Pushes a dword onto the stack, using a temporary stack pointer.
7868 *
7869 * @returns Strict VBox status code.
7870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7871 * @param u32Value The value to push.
7872 * @param pTmpRsp Pointer to the temporary stack pointer.
7873 */
7874VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7875{
7876 /* Increment the stack pointer. */
7877 RTUINT64U NewRsp = *pTmpRsp;
7878 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
7879
7880 /* Write the word the lazy way. */
7881 uint32_t *pu32Dst;
7882 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7883 if (rc == VINF_SUCCESS)
7884 {
7885 *pu32Dst = u32Value;
7886 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7887 }
7888
7889 /* Commit the new RSP value unless we an access handler made trouble. */
7890 if (rc == VINF_SUCCESS)
7891 *pTmpRsp = NewRsp;
7892
7893 return rc;
7894}
7895
7896
7897/**
7898 * Pushes a dword onto the stack, using a temporary stack pointer.
7899 *
7900 * @returns Strict VBox status code.
7901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7902 * @param u64Value The value to push.
7903 * @param pTmpRsp Pointer to the temporary stack pointer.
7904 */
7905VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7906{
7907 /* Increment the stack pointer. */
7908 RTUINT64U NewRsp = *pTmpRsp;
7909 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
7910
7911 /* Write the word the lazy way. */
7912 uint64_t *pu64Dst;
7913 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7914 if (rc == VINF_SUCCESS)
7915 {
7916 *pu64Dst = u64Value;
7917 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7918 }
7919
7920 /* Commit the new RSP value unless we an access handler made trouble. */
7921 if (rc == VINF_SUCCESS)
7922 *pTmpRsp = NewRsp;
7923
7924 return rc;
7925}
7926
7927
7928/**
7929 * Pops a word from the stack, using a temporary stack pointer.
7930 *
7931 * @returns Strict VBox status code.
7932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7933 * @param pu16Value Where to store the popped value.
7934 * @param pTmpRsp Pointer to the temporary stack pointer.
7935 */
7936VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7937{
7938 /* Increment the stack pointer. */
7939 RTUINT64U NewRsp = *pTmpRsp;
7940 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
7941
7942 /* Write the word the lazy way. */
7943 uint16_t const *pu16Src;
7944 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7945 if (rc == VINF_SUCCESS)
7946 {
7947 *pu16Value = *pu16Src;
7948 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7949
7950 /* Commit the new RSP value. */
7951 if (rc == VINF_SUCCESS)
7952 *pTmpRsp = NewRsp;
7953 }
7954
7955 return rc;
7956}
7957
7958
7959/**
7960 * Pops a dword from the stack, using a temporary stack pointer.
7961 *
7962 * @returns Strict VBox status code.
7963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7964 * @param pu32Value Where to store the popped value.
7965 * @param pTmpRsp Pointer to the temporary stack pointer.
7966 */
7967VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7968{
7969 /* Increment the stack pointer. */
7970 RTUINT64U NewRsp = *pTmpRsp;
7971 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
7972
7973 /* Write the word the lazy way. */
7974 uint32_t const *pu32Src;
7975 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7976 if (rc == VINF_SUCCESS)
7977 {
7978 *pu32Value = *pu32Src;
7979 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7980
7981 /* Commit the new RSP value. */
7982 if (rc == VINF_SUCCESS)
7983 *pTmpRsp = NewRsp;
7984 }
7985
7986 return rc;
7987}
7988
7989
7990/**
7991 * Pops a qword from the stack, using a temporary stack pointer.
7992 *
7993 * @returns Strict VBox status code.
7994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7995 * @param pu64Value Where to store the popped value.
7996 * @param pTmpRsp Pointer to the temporary stack pointer.
7997 */
7998VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7999{
8000 /* Increment the stack pointer. */
8001 RTUINT64U NewRsp = *pTmpRsp;
8002 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8003
8004 /* Write the word the lazy way. */
8005 uint64_t const *pu64Src;
8006 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8007 if (rcStrict == VINF_SUCCESS)
8008 {
8009 *pu64Value = *pu64Src;
8010 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8011
8012 /* Commit the new RSP value. */
8013 if (rcStrict == VINF_SUCCESS)
8014 *pTmpRsp = NewRsp;
8015 }
8016
8017 return rcStrict;
8018}
8019
8020
8021/**
8022 * Begin a special stack push (used by interrupt, exceptions and such).
8023 *
8024 * This will raise \#SS or \#PF if appropriate.
8025 *
8026 * @returns Strict VBox status code.
8027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8028 * @param cbMem The number of bytes to push onto the stack.
8029 * @param ppvMem Where to return the pointer to the stack memory.
8030 * As with the other memory functions this could be
8031 * direct access or bounce buffered access, so
8032 * don't commit register until the commit call
8033 * succeeds.
8034 * @param puNewRsp Where to return the new RSP value. This must be
8035 * passed unchanged to
8036 * iemMemStackPushCommitSpecial().
8037 */
8038VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8039{
8040 Assert(cbMem < UINT8_MAX);
8041 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8042 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8043}
8044
8045
8046/**
8047 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8048 *
8049 * This will update the rSP.
8050 *
8051 * @returns Strict VBox status code.
8052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8053 * @param pvMem The pointer returned by
8054 * iemMemStackPushBeginSpecial().
8055 * @param uNewRsp The new RSP value returned by
8056 * iemMemStackPushBeginSpecial().
8057 */
8058VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8059{
8060 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8061 if (rcStrict == VINF_SUCCESS)
8062 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8063 return rcStrict;
8064}
8065
8066
8067/**
8068 * Begin a special stack pop (used by iret, retf and such).
8069 *
8070 * This will raise \#SS or \#PF if appropriate.
8071 *
8072 * @returns Strict VBox status code.
8073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8074 * @param cbMem The number of bytes to pop from the stack.
8075 * @param ppvMem Where to return the pointer to the stack memory.
8076 * @param puNewRsp Where to return the new RSP value. This must be
8077 * assigned to CPUMCTX::rsp manually some time
8078 * after iemMemStackPopDoneSpecial() has been
8079 * called.
8080 */
8081VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8082{
8083 Assert(cbMem < UINT8_MAX);
8084 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8085 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8086}
8087
8088
8089/**
8090 * Continue a special stack pop (used by iret and retf).
8091 *
8092 * This will raise \#SS or \#PF if appropriate.
8093 *
8094 * @returns Strict VBox status code.
8095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8096 * @param cbMem The number of bytes to pop from the stack.
8097 * @param ppvMem Where to return the pointer to the stack memory.
8098 * @param puNewRsp Where to return the new RSP value. This must be
8099 * assigned to CPUMCTX::rsp manually some time
8100 * after iemMemStackPopDoneSpecial() has been
8101 * called.
8102 */
8103VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8104{
8105 Assert(cbMem < UINT8_MAX);
8106 RTUINT64U NewRsp;
8107 NewRsp.u = *puNewRsp;
8108 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8109 *puNewRsp = NewRsp.u;
8110 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8111}
8112
8113
8114/**
8115 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8116 * iemMemStackPopContinueSpecial).
8117 *
8118 * The caller will manually commit the rSP.
8119 *
8120 * @returns Strict VBox status code.
8121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8122 * @param pvMem The pointer returned by
8123 * iemMemStackPopBeginSpecial() or
8124 * iemMemStackPopContinueSpecial().
8125 */
8126VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8127{
8128 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8129}
8130
8131
8132/**
8133 * Fetches a system table byte.
8134 *
8135 * @returns Strict VBox status code.
8136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8137 * @param pbDst Where to return the byte.
8138 * @param iSegReg The index of the segment register to use for
8139 * this access. The base and limits are checked.
8140 * @param GCPtrMem The address of the guest memory.
8141 */
8142VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8143{
8144 /* The lazy approach for now... */
8145 uint8_t const *pbSrc;
8146 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8147 if (rc == VINF_SUCCESS)
8148 {
8149 *pbDst = *pbSrc;
8150 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8151 }
8152 return rc;
8153}
8154
8155
8156/**
8157 * Fetches a system table word.
8158 *
8159 * @returns Strict VBox status code.
8160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8161 * @param pu16Dst Where to return the word.
8162 * @param iSegReg The index of the segment register to use for
8163 * this access. The base and limits are checked.
8164 * @param GCPtrMem The address of the guest memory.
8165 */
8166VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8167{
8168 /* The lazy approach for now... */
8169 uint16_t const *pu16Src;
8170 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8171 if (rc == VINF_SUCCESS)
8172 {
8173 *pu16Dst = *pu16Src;
8174 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8175 }
8176 return rc;
8177}
8178
8179
8180/**
8181 * Fetches a system table dword.
8182 *
8183 * @returns Strict VBox status code.
8184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8185 * @param pu32Dst Where to return the dword.
8186 * @param iSegReg The index of the segment register to use for
8187 * this access. The base and limits are checked.
8188 * @param GCPtrMem The address of the guest memory.
8189 */
8190VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8191{
8192 /* The lazy approach for now... */
8193 uint32_t const *pu32Src;
8194 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8195 if (rc == VINF_SUCCESS)
8196 {
8197 *pu32Dst = *pu32Src;
8198 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8199 }
8200 return rc;
8201}
8202
8203
8204/**
8205 * Fetches a system table qword.
8206 *
8207 * @returns Strict VBox status code.
8208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8209 * @param pu64Dst Where to return the qword.
8210 * @param iSegReg The index of the segment register to use for
8211 * this access. The base and limits are checked.
8212 * @param GCPtrMem The address of the guest memory.
8213 */
8214VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8215{
8216 /* The lazy approach for now... */
8217 uint64_t const *pu64Src;
8218 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8219 if (rc == VINF_SUCCESS)
8220 {
8221 *pu64Dst = *pu64Src;
8222 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8223 }
8224 return rc;
8225}
8226
8227
8228/**
8229 * Fetches a descriptor table entry with caller specified error code.
8230 *
8231 * @returns Strict VBox status code.
8232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8233 * @param pDesc Where to return the descriptor table entry.
8234 * @param uSel The selector which table entry to fetch.
8235 * @param uXcpt The exception to raise on table lookup error.
8236 * @param uErrorCode The error code associated with the exception.
8237 */
8238static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8239 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8240{
8241 AssertPtr(pDesc);
8242 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8243
8244 /** @todo did the 286 require all 8 bytes to be accessible? */
8245 /*
8246 * Get the selector table base and check bounds.
8247 */
8248 RTGCPTR GCPtrBase;
8249 if (uSel & X86_SEL_LDT)
8250 {
8251 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8252 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8253 {
8254 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8255 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8256 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8257 uErrorCode, 0);
8258 }
8259
8260 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8261 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8262 }
8263 else
8264 {
8265 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8266 {
8267 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8268 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8269 uErrorCode, 0);
8270 }
8271 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8272 }
8273
8274 /*
8275 * Read the legacy descriptor and maybe the long mode extensions if
8276 * required.
8277 */
8278 VBOXSTRICTRC rcStrict;
8279 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8280 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8281 else
8282 {
8283 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8284 if (rcStrict == VINF_SUCCESS)
8285 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8286 if (rcStrict == VINF_SUCCESS)
8287 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8288 if (rcStrict == VINF_SUCCESS)
8289 pDesc->Legacy.au16[3] = 0;
8290 else
8291 return rcStrict;
8292 }
8293
8294 if (rcStrict == VINF_SUCCESS)
8295 {
8296 if ( !IEM_IS_LONG_MODE(pVCpu)
8297 || pDesc->Legacy.Gen.u1DescType)
8298 pDesc->Long.au64[1] = 0;
8299 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8300 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8301 else
8302 {
8303 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8304 /** @todo is this the right exception? */
8305 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8306 }
8307 }
8308 return rcStrict;
8309}
8310
8311
8312/**
8313 * Fetches a descriptor table entry.
8314 *
8315 * @returns Strict VBox status code.
8316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8317 * @param pDesc Where to return the descriptor table entry.
8318 * @param uSel The selector which table entry to fetch.
8319 * @param uXcpt The exception to raise on table lookup error.
8320 */
8321VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8322{
8323 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8324}
8325
8326
8327/**
8328 * Marks the selector descriptor as accessed (only non-system descriptors).
8329 *
8330 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8331 * will therefore skip the limit checks.
8332 *
8333 * @returns Strict VBox status code.
8334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8335 * @param uSel The selector.
8336 */
8337VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8338{
8339 /*
8340 * Get the selector table base and calculate the entry address.
8341 */
8342 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8343 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8344 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8345 GCPtr += uSel & X86_SEL_MASK;
8346
8347 /*
8348 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8349 * ugly stuff to avoid this. This will make sure it's an atomic access
8350 * as well more or less remove any question about 8-bit or 32-bit accesss.
8351 */
8352 VBOXSTRICTRC rcStrict;
8353 uint32_t volatile *pu32;
8354 if ((GCPtr & 3) == 0)
8355 {
8356 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8357 GCPtr += 2 + 2;
8358 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8359 if (rcStrict != VINF_SUCCESS)
8360 return rcStrict;
8361 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8362 }
8363 else
8364 {
8365 /* The misaligned GDT/LDT case, map the whole thing. */
8366 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8367 if (rcStrict != VINF_SUCCESS)
8368 return rcStrict;
8369 switch ((uintptr_t)pu32 & 3)
8370 {
8371 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8372 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8373 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8374 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8375 }
8376 }
8377
8378 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8379}
8380
8381/** @} */
8382
8383/** @name Opcode Helpers.
8384 * @{
8385 */
8386
8387/**
8388 * Calculates the effective address of a ModR/M memory operand.
8389 *
8390 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8391 *
8392 * @return Strict VBox status code.
8393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8394 * @param bRm The ModRM byte.
8395 * @param cbImm The size of any immediate following the
8396 * effective address opcode bytes. Important for
8397 * RIP relative addressing.
8398 * @param pGCPtrEff Where to return the effective address.
8399 */
8400VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8401{
8402 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8403# define SET_SS_DEF() \
8404 do \
8405 { \
8406 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8407 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8408 } while (0)
8409
8410 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8411 {
8412/** @todo Check the effective address size crap! */
8413 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8414 {
8415 uint16_t u16EffAddr;
8416
8417 /* Handle the disp16 form with no registers first. */
8418 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8419 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8420 else
8421 {
8422 /* Get the displacment. */
8423 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8424 {
8425 case 0: u16EffAddr = 0; break;
8426 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8427 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8428 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8429 }
8430
8431 /* Add the base and index registers to the disp. */
8432 switch (bRm & X86_MODRM_RM_MASK)
8433 {
8434 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8435 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8436 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8437 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8438 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8439 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8440 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8441 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8442 }
8443 }
8444
8445 *pGCPtrEff = u16EffAddr;
8446 }
8447 else
8448 {
8449 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8450 uint32_t u32EffAddr;
8451
8452 /* Handle the disp32 form with no registers first. */
8453 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8454 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8455 else
8456 {
8457 /* Get the register (or SIB) value. */
8458 switch ((bRm & X86_MODRM_RM_MASK))
8459 {
8460 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8461 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8462 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8463 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8464 case 4: /* SIB */
8465 {
8466 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8467
8468 /* Get the index and scale it. */
8469 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8470 {
8471 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8472 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8473 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8474 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8475 case 4: u32EffAddr = 0; /*none */ break;
8476 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8477 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8478 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8479 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8480 }
8481 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8482
8483 /* add base */
8484 switch (bSib & X86_SIB_BASE_MASK)
8485 {
8486 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8487 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8488 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8489 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8490 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8491 case 5:
8492 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8493 {
8494 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8495 SET_SS_DEF();
8496 }
8497 else
8498 {
8499 uint32_t u32Disp;
8500 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8501 u32EffAddr += u32Disp;
8502 }
8503 break;
8504 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8505 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8506 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8507 }
8508 break;
8509 }
8510 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8511 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8512 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8513 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8514 }
8515
8516 /* Get and add the displacement. */
8517 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8518 {
8519 case 0:
8520 break;
8521 case 1:
8522 {
8523 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8524 u32EffAddr += i8Disp;
8525 break;
8526 }
8527 case 2:
8528 {
8529 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8530 u32EffAddr += u32Disp;
8531 break;
8532 }
8533 default:
8534 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8535 }
8536
8537 }
8538 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8539 *pGCPtrEff = u32EffAddr;
8540 else
8541 {
8542 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8543 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8544 }
8545 }
8546 }
8547 else
8548 {
8549 uint64_t u64EffAddr;
8550
8551 /* Handle the rip+disp32 form with no registers first. */
8552 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8553 {
8554 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8555 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8556 }
8557 else
8558 {
8559 /* Get the register (or SIB) value. */
8560 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8561 {
8562 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8563 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8564 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8565 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8566 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8567 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8568 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8569 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8570 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8571 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8572 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8573 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8574 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8575 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8576 /* SIB */
8577 case 4:
8578 case 12:
8579 {
8580 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8581
8582 /* Get the index and scale it. */
8583 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8584 {
8585 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8586 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8587 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8588 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8589 case 4: u64EffAddr = 0; /*none */ break;
8590 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8591 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8592 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8593 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8594 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8595 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8596 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8597 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8598 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8599 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8600 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8601 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8602 }
8603 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8604
8605 /* add base */
8606 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8607 {
8608 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8609 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8610 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8611 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8612 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8613 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8614 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8615 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8616 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8617 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8618 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8619 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8620 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8621 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8622 /* complicated encodings */
8623 case 5:
8624 case 13:
8625 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8626 {
8627 if (!pVCpu->iem.s.uRexB)
8628 {
8629 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8630 SET_SS_DEF();
8631 }
8632 else
8633 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8634 }
8635 else
8636 {
8637 uint32_t u32Disp;
8638 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8639 u64EffAddr += (int32_t)u32Disp;
8640 }
8641 break;
8642 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8643 }
8644 break;
8645 }
8646 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8647 }
8648
8649 /* Get and add the displacement. */
8650 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8651 {
8652 case 0:
8653 break;
8654 case 1:
8655 {
8656 int8_t i8Disp;
8657 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8658 u64EffAddr += i8Disp;
8659 break;
8660 }
8661 case 2:
8662 {
8663 uint32_t u32Disp;
8664 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8665 u64EffAddr += (int32_t)u32Disp;
8666 break;
8667 }
8668 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8669 }
8670
8671 }
8672
8673 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8674 *pGCPtrEff = u64EffAddr;
8675 else
8676 {
8677 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8678 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8679 }
8680 }
8681
8682 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8683 return VINF_SUCCESS;
8684}
8685
8686
8687/**
8688 * Calculates the effective address of a ModR/M memory operand.
8689 *
8690 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8691 *
8692 * @return Strict VBox status code.
8693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8694 * @param bRm The ModRM byte.
8695 * @param cbImm The size of any immediate following the
8696 * effective address opcode bytes. Important for
8697 * RIP relative addressing.
8698 * @param pGCPtrEff Where to return the effective address.
8699 * @param offRsp RSP displacement.
8700 */
8701VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8702{
8703 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8704# define SET_SS_DEF() \
8705 do \
8706 { \
8707 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8708 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8709 } while (0)
8710
8711 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8712 {
8713/** @todo Check the effective address size crap! */
8714 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8715 {
8716 uint16_t u16EffAddr;
8717
8718 /* Handle the disp16 form with no registers first. */
8719 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8720 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8721 else
8722 {
8723 /* Get the displacment. */
8724 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8725 {
8726 case 0: u16EffAddr = 0; break;
8727 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8728 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8729 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8730 }
8731
8732 /* Add the base and index registers to the disp. */
8733 switch (bRm & X86_MODRM_RM_MASK)
8734 {
8735 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8736 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8737 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8738 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8739 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8740 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8741 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8742 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8743 }
8744 }
8745
8746 *pGCPtrEff = u16EffAddr;
8747 }
8748 else
8749 {
8750 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8751 uint32_t u32EffAddr;
8752
8753 /* Handle the disp32 form with no registers first. */
8754 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8755 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8756 else
8757 {
8758 /* Get the register (or SIB) value. */
8759 switch ((bRm & X86_MODRM_RM_MASK))
8760 {
8761 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8762 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8763 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8764 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8765 case 4: /* SIB */
8766 {
8767 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8768
8769 /* Get the index and scale it. */
8770 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8771 {
8772 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8773 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8774 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8775 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8776 case 4: u32EffAddr = 0; /*none */ break;
8777 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8778 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8779 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8780 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8781 }
8782 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8783
8784 /* add base */
8785 switch (bSib & X86_SIB_BASE_MASK)
8786 {
8787 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8788 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8789 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8790 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8791 case 4:
8792 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8793 SET_SS_DEF();
8794 break;
8795 case 5:
8796 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8797 {
8798 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8799 SET_SS_DEF();
8800 }
8801 else
8802 {
8803 uint32_t u32Disp;
8804 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8805 u32EffAddr += u32Disp;
8806 }
8807 break;
8808 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8809 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8810 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8811 }
8812 break;
8813 }
8814 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8815 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8816 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8817 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8818 }
8819
8820 /* Get and add the displacement. */
8821 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8822 {
8823 case 0:
8824 break;
8825 case 1:
8826 {
8827 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8828 u32EffAddr += i8Disp;
8829 break;
8830 }
8831 case 2:
8832 {
8833 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8834 u32EffAddr += u32Disp;
8835 break;
8836 }
8837 default:
8838 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8839 }
8840
8841 }
8842 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8843 *pGCPtrEff = u32EffAddr;
8844 else
8845 {
8846 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8847 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8848 }
8849 }
8850 }
8851 else
8852 {
8853 uint64_t u64EffAddr;
8854
8855 /* Handle the rip+disp32 form with no registers first. */
8856 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8857 {
8858 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8859 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8860 }
8861 else
8862 {
8863 /* Get the register (or SIB) value. */
8864 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8865 {
8866 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8867 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8868 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8869 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8870 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8871 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8872 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8873 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8874 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8875 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8876 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8877 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8878 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8879 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8880 /* SIB */
8881 case 4:
8882 case 12:
8883 {
8884 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8885
8886 /* Get the index and scale it. */
8887 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8888 {
8889 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8890 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8891 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8892 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8893 case 4: u64EffAddr = 0; /*none */ break;
8894 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8895 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8896 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8897 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8898 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8899 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8900 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8901 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8902 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8903 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8904 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8905 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8906 }
8907 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8908
8909 /* add base */
8910 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8911 {
8912 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8913 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8914 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8915 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8916 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
8917 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8918 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8919 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8920 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8921 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8922 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8923 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8924 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8925 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8926 /* complicated encodings */
8927 case 5:
8928 case 13:
8929 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8930 {
8931 if (!pVCpu->iem.s.uRexB)
8932 {
8933 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8934 SET_SS_DEF();
8935 }
8936 else
8937 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8938 }
8939 else
8940 {
8941 uint32_t u32Disp;
8942 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8943 u64EffAddr += (int32_t)u32Disp;
8944 }
8945 break;
8946 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8947 }
8948 break;
8949 }
8950 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8951 }
8952
8953 /* Get and add the displacement. */
8954 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8955 {
8956 case 0:
8957 break;
8958 case 1:
8959 {
8960 int8_t i8Disp;
8961 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8962 u64EffAddr += i8Disp;
8963 break;
8964 }
8965 case 2:
8966 {
8967 uint32_t u32Disp;
8968 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8969 u64EffAddr += (int32_t)u32Disp;
8970 break;
8971 }
8972 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8973 }
8974
8975 }
8976
8977 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8978 *pGCPtrEff = u64EffAddr;
8979 else
8980 {
8981 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8982 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8983 }
8984 }
8985
8986 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8987 return VINF_SUCCESS;
8988}
8989
8990
8991#ifdef IEM_WITH_SETJMP
8992/**
8993 * Calculates the effective address of a ModR/M memory operand.
8994 *
8995 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8996 *
8997 * May longjmp on internal error.
8998 *
8999 * @return The effective address.
9000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9001 * @param bRm The ModRM byte.
9002 * @param cbImm The size of any immediate following the
9003 * effective address opcode bytes. Important for
9004 * RIP relative addressing.
9005 */
9006RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9007{
9008 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9009# define SET_SS_DEF() \
9010 do \
9011 { \
9012 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9013 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9014 } while (0)
9015
9016 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9017 {
9018/** @todo Check the effective address size crap! */
9019 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9020 {
9021 uint16_t u16EffAddr;
9022
9023 /* Handle the disp16 form with no registers first. */
9024 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9025 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9026 else
9027 {
9028 /* Get the displacment. */
9029 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9030 {
9031 case 0: u16EffAddr = 0; break;
9032 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9033 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9034 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9035 }
9036
9037 /* Add the base and index registers to the disp. */
9038 switch (bRm & X86_MODRM_RM_MASK)
9039 {
9040 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9041 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9042 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9043 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9044 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9045 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9046 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9047 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9048 }
9049 }
9050
9051 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9052 return u16EffAddr;
9053 }
9054
9055 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9056 uint32_t u32EffAddr;
9057
9058 /* Handle the disp32 form with no registers first. */
9059 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9060 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9061 else
9062 {
9063 /* Get the register (or SIB) value. */
9064 switch ((bRm & X86_MODRM_RM_MASK))
9065 {
9066 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9067 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9068 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9069 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9070 case 4: /* SIB */
9071 {
9072 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9073
9074 /* Get the index and scale it. */
9075 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9076 {
9077 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9078 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9079 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9080 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9081 case 4: u32EffAddr = 0; /*none */ break;
9082 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9083 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9084 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9085 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9086 }
9087 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9088
9089 /* add base */
9090 switch (bSib & X86_SIB_BASE_MASK)
9091 {
9092 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9093 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9094 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9095 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9096 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9097 case 5:
9098 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9099 {
9100 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9101 SET_SS_DEF();
9102 }
9103 else
9104 {
9105 uint32_t u32Disp;
9106 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9107 u32EffAddr += u32Disp;
9108 }
9109 break;
9110 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9111 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9112 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9113 }
9114 break;
9115 }
9116 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9117 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9118 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9119 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9120 }
9121
9122 /* Get and add the displacement. */
9123 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9124 {
9125 case 0:
9126 break;
9127 case 1:
9128 {
9129 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9130 u32EffAddr += i8Disp;
9131 break;
9132 }
9133 case 2:
9134 {
9135 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9136 u32EffAddr += u32Disp;
9137 break;
9138 }
9139 default:
9140 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9141 }
9142 }
9143
9144 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9145 {
9146 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9147 return u32EffAddr;
9148 }
9149 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9150 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9151 return u32EffAddr & UINT16_MAX;
9152 }
9153
9154 uint64_t u64EffAddr;
9155
9156 /* Handle the rip+disp32 form with no registers first. */
9157 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9158 {
9159 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9160 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9161 }
9162 else
9163 {
9164 /* Get the register (or SIB) value. */
9165 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9166 {
9167 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9168 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9169 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9170 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9171 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9172 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9173 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9174 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9175 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9176 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9177 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9178 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9179 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9180 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9181 /* SIB */
9182 case 4:
9183 case 12:
9184 {
9185 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9186
9187 /* Get the index and scale it. */
9188 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9189 {
9190 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9191 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9192 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9193 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9194 case 4: u64EffAddr = 0; /*none */ break;
9195 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9196 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9197 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9198 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9199 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9200 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9201 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9202 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9203 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9204 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9205 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9206 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9207 }
9208 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9209
9210 /* add base */
9211 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9212 {
9213 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9214 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9215 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9216 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9217 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9218 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9219 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9220 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9221 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9222 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9223 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9224 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9225 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9226 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9227 /* complicated encodings */
9228 case 5:
9229 case 13:
9230 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9231 {
9232 if (!pVCpu->iem.s.uRexB)
9233 {
9234 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9235 SET_SS_DEF();
9236 }
9237 else
9238 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9239 }
9240 else
9241 {
9242 uint32_t u32Disp;
9243 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9244 u64EffAddr += (int32_t)u32Disp;
9245 }
9246 break;
9247 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9248 }
9249 break;
9250 }
9251 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9252 }
9253
9254 /* Get and add the displacement. */
9255 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9256 {
9257 case 0:
9258 break;
9259 case 1:
9260 {
9261 int8_t i8Disp;
9262 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9263 u64EffAddr += i8Disp;
9264 break;
9265 }
9266 case 2:
9267 {
9268 uint32_t u32Disp;
9269 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9270 u64EffAddr += (int32_t)u32Disp;
9271 break;
9272 }
9273 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9274 }
9275
9276 }
9277
9278 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9279 {
9280 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9281 return u64EffAddr;
9282 }
9283 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9284 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9285 return u64EffAddr & UINT32_MAX;
9286}
9287#endif /* IEM_WITH_SETJMP */
9288
9289/** @} */
9290
9291
9292#ifdef LOG_ENABLED
9293/**
9294 * Logs the current instruction.
9295 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9296 * @param fSameCtx Set if we have the same context information as the VMM,
9297 * clear if we may have already executed an instruction in
9298 * our debug context. When clear, we assume IEMCPU holds
9299 * valid CPU mode info.
9300 *
9301 * The @a fSameCtx parameter is now misleading and obsolete.
9302 * @param pszFunction The IEM function doing the execution.
9303 */
9304static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9305{
9306# ifdef IN_RING3
9307 if (LogIs2Enabled())
9308 {
9309 char szInstr[256];
9310 uint32_t cbInstr = 0;
9311 if (fSameCtx)
9312 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9313 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9314 szInstr, sizeof(szInstr), &cbInstr);
9315 else
9316 {
9317 uint32_t fFlags = 0;
9318 switch (pVCpu->iem.s.enmCpuMode)
9319 {
9320 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9321 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9322 case IEMMODE_16BIT:
9323 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9324 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9325 else
9326 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9327 break;
9328 }
9329 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9330 szInstr, sizeof(szInstr), &cbInstr);
9331 }
9332
9333 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9334 Log2(("**** %s\n"
9335 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9336 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9337 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9338 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9339 " %s\n"
9340 , pszFunction,
9341 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9342 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9343 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9344 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9345 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9346 szInstr));
9347
9348 if (LogIs3Enabled())
9349 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9350 }
9351 else
9352# endif
9353 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9354 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9355 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9356}
9357#endif /* LOG_ENABLED */
9358
9359
9360#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9361/**
9362 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9363 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9364 *
9365 * @returns Modified rcStrict.
9366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9367 * @param rcStrict The instruction execution status.
9368 */
9369static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9370{
9371 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9372 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9373 {
9374 /* VMX preemption timer takes priority over NMI-window exits. */
9375 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9376 {
9377 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9378 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9379 }
9380 /*
9381 * Check remaining intercepts.
9382 *
9383 * NMI-window and Interrupt-window VM-exits.
9384 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9385 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9386 *
9387 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9388 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9389 */
9390 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9391 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9392 && !TRPMHasTrap(pVCpu))
9393 {
9394 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9395 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9396 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9397 {
9398 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9399 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9400 }
9401 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9402 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9403 {
9404 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9405 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9406 }
9407 }
9408 }
9409 /* TPR-below threshold/APIC write has the highest priority. */
9410 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9411 {
9412 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9413 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9414 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9415 }
9416 /* MTF takes priority over VMX-preemption timer. */
9417 else
9418 {
9419 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9420 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9421 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9422 }
9423 return rcStrict;
9424}
9425#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9426
9427
9428/**
9429 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9430 * IEMExecOneWithPrefetchedByPC.
9431 *
9432 * Similar code is found in IEMExecLots.
9433 *
9434 * @return Strict VBox status code.
9435 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9436 * @param fExecuteInhibit If set, execute the instruction following CLI,
9437 * POP SS and MOV SS,GR.
9438 * @param pszFunction The calling function name.
9439 */
9440DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9441{
9442 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9443 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9444 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9445 RT_NOREF_PV(pszFunction);
9446
9447#ifdef IEM_WITH_SETJMP
9448 VBOXSTRICTRC rcStrict;
9449 jmp_buf JmpBuf;
9450 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9451 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9452 if ((rcStrict = setjmp(JmpBuf)) == 0)
9453 {
9454 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9455 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9456 }
9457 else
9458 pVCpu->iem.s.cLongJumps++;
9459 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9460#else
9461 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9462 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9463#endif
9464 if (rcStrict == VINF_SUCCESS)
9465 pVCpu->iem.s.cInstructions++;
9466 if (pVCpu->iem.s.cActiveMappings > 0)
9467 {
9468 Assert(rcStrict != VINF_SUCCESS);
9469 iemMemRollback(pVCpu);
9470 }
9471 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9472 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9473 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9474
9475//#ifdef DEBUG
9476// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9477//#endif
9478
9479#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9480 /*
9481 * Perform any VMX nested-guest instruction boundary actions.
9482 *
9483 * If any of these causes a VM-exit, we must skip executing the next
9484 * instruction (would run into stale page tables). A VM-exit makes sure
9485 * there is no interrupt-inhibition, so that should ensure we don't go
9486 * to try execute the next instruction. Clearing fExecuteInhibit is
9487 * problematic because of the setjmp/longjmp clobbering above.
9488 */
9489 if ( rcStrict == VINF_SUCCESS
9490 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9491 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9492 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
9493#endif
9494
9495 /* Execute the next instruction as well if a cli, pop ss or
9496 mov ss, Gr has just completed successfully. */
9497 if ( fExecuteInhibit
9498 && rcStrict == VINF_SUCCESS
9499 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9500 && EMIsInhibitInterruptsActive(pVCpu))
9501 {
9502 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9503 if (rcStrict == VINF_SUCCESS)
9504 {
9505#ifdef LOG_ENABLED
9506 iemLogCurInstr(pVCpu, false, pszFunction);
9507#endif
9508#ifdef IEM_WITH_SETJMP
9509 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9510 if ((rcStrict = setjmp(JmpBuf)) == 0)
9511 {
9512 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9513 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9514 }
9515 else
9516 pVCpu->iem.s.cLongJumps++;
9517 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9518#else
9519 IEM_OPCODE_GET_NEXT_U8(&b);
9520 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9521#endif
9522 if (rcStrict == VINF_SUCCESS)
9523 pVCpu->iem.s.cInstructions++;
9524 if (pVCpu->iem.s.cActiveMappings > 0)
9525 {
9526 Assert(rcStrict != VINF_SUCCESS);
9527 iemMemRollback(pVCpu);
9528 }
9529 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9530 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9531 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9532 }
9533 else if (pVCpu->iem.s.cActiveMappings > 0)
9534 iemMemRollback(pVCpu);
9535 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9536 }
9537
9538 /*
9539 * Return value fiddling, statistics and sanity assertions.
9540 */
9541 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9542
9543 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9544 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9545 return rcStrict;
9546}
9547
9548
9549/**
9550 * Execute one instruction.
9551 *
9552 * @return Strict VBox status code.
9553 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9554 */
9555VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9556{
9557 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9558#ifdef LOG_ENABLED
9559 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9560#endif
9561
9562 /*
9563 * Do the decoding and emulation.
9564 */
9565 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9566 if (rcStrict == VINF_SUCCESS)
9567 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9568 else if (pVCpu->iem.s.cActiveMappings > 0)
9569 iemMemRollback(pVCpu);
9570
9571 if (rcStrict != VINF_SUCCESS)
9572 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9573 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9574 return rcStrict;
9575}
9576
9577
9578VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9579{
9580 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9581
9582 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9583 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9584 if (rcStrict == VINF_SUCCESS)
9585 {
9586 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9587 if (pcbWritten)
9588 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9589 }
9590 else if (pVCpu->iem.s.cActiveMappings > 0)
9591 iemMemRollback(pVCpu);
9592
9593 return rcStrict;
9594}
9595
9596
9597VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9598 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9599{
9600 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9601
9602 VBOXSTRICTRC rcStrict;
9603 if ( cbOpcodeBytes
9604 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9605 {
9606 iemInitDecoder(pVCpu, false, false);
9607#ifdef IEM_WITH_CODE_TLB
9608 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9609 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9610 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9611 pVCpu->iem.s.offCurInstrStart = 0;
9612 pVCpu->iem.s.offInstrNextByte = 0;
9613#else
9614 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9615 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9616#endif
9617 rcStrict = VINF_SUCCESS;
9618 }
9619 else
9620 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9621 if (rcStrict == VINF_SUCCESS)
9622 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9623 else if (pVCpu->iem.s.cActiveMappings > 0)
9624 iemMemRollback(pVCpu);
9625
9626 return rcStrict;
9627}
9628
9629
9630VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9631{
9632 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9633
9634 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9635 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9636 if (rcStrict == VINF_SUCCESS)
9637 {
9638 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9639 if (pcbWritten)
9640 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9641 }
9642 else if (pVCpu->iem.s.cActiveMappings > 0)
9643 iemMemRollback(pVCpu);
9644
9645 return rcStrict;
9646}
9647
9648
9649VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9650 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9651{
9652 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9653
9654 VBOXSTRICTRC rcStrict;
9655 if ( cbOpcodeBytes
9656 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9657 {
9658 iemInitDecoder(pVCpu, true, false);
9659#ifdef IEM_WITH_CODE_TLB
9660 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9661 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9662 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9663 pVCpu->iem.s.offCurInstrStart = 0;
9664 pVCpu->iem.s.offInstrNextByte = 0;
9665#else
9666 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9667 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9668#endif
9669 rcStrict = VINF_SUCCESS;
9670 }
9671 else
9672 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9673 if (rcStrict == VINF_SUCCESS)
9674 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9675 else if (pVCpu->iem.s.cActiveMappings > 0)
9676 iemMemRollback(pVCpu);
9677
9678 return rcStrict;
9679}
9680
9681
9682/**
9683 * For debugging DISGetParamSize, may come in handy.
9684 *
9685 * @returns Strict VBox status code.
9686 * @param pVCpu The cross context virtual CPU structure of the
9687 * calling EMT.
9688 * @param pCtxCore The context core structure.
9689 * @param OpcodeBytesPC The PC of the opcode bytes.
9690 * @param pvOpcodeBytes Prefeched opcode bytes.
9691 * @param cbOpcodeBytes Number of prefetched bytes.
9692 * @param pcbWritten Where to return the number of bytes written.
9693 * Optional.
9694 */
9695VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9696 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9697 uint32_t *pcbWritten)
9698{
9699 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9700
9701 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9702 VBOXSTRICTRC rcStrict;
9703 if ( cbOpcodeBytes
9704 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9705 {
9706 iemInitDecoder(pVCpu, true, false);
9707#ifdef IEM_WITH_CODE_TLB
9708 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9709 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9710 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9711 pVCpu->iem.s.offCurInstrStart = 0;
9712 pVCpu->iem.s.offInstrNextByte = 0;
9713#else
9714 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9715 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9716#endif
9717 rcStrict = VINF_SUCCESS;
9718 }
9719 else
9720 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9721 if (rcStrict == VINF_SUCCESS)
9722 {
9723 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9724 if (pcbWritten)
9725 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9726 }
9727 else if (pVCpu->iem.s.cActiveMappings > 0)
9728 iemMemRollback(pVCpu);
9729
9730 return rcStrict;
9731}
9732
9733
9734/**
9735 * For handling split cacheline lock operations when the host has split-lock
9736 * detection enabled.
9737 *
9738 * This will cause the interpreter to disregard the lock prefix and implicit
9739 * locking (xchg).
9740 *
9741 * @returns Strict VBox status code.
9742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9743 */
9744VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9745{
9746 /*
9747 * Do the decoding and emulation.
9748 */
9749 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9750 if (rcStrict == VINF_SUCCESS)
9751 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9752 else if (pVCpu->iem.s.cActiveMappings > 0)
9753 iemMemRollback(pVCpu);
9754
9755 if (rcStrict != VINF_SUCCESS)
9756 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9757 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9758 return rcStrict;
9759}
9760
9761
9762VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9763{
9764 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9765 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9766
9767 /*
9768 * See if there is an interrupt pending in TRPM, inject it if we can.
9769 */
9770 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9771#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9772 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9773 if (fIntrEnabled)
9774 {
9775 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9776 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9777 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9778 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9779 else
9780 {
9781 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9782 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9783 }
9784 }
9785#else
9786 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9787#endif
9788
9789 /** @todo What if we are injecting an exception and not an interrupt? Is that
9790 * possible here? For now we assert it is indeed only an interrupt. */
9791 if ( fIntrEnabled
9792 && TRPMHasTrap(pVCpu)
9793 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
9794 {
9795 uint8_t u8TrapNo;
9796 TRPMEVENT enmType;
9797 uint32_t uErrCode;
9798 RTGCPTR uCr2;
9799 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
9800 AssertRC(rc2);
9801 Assert(enmType == TRPM_HARDWARE_INT);
9802 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9803 TRPMResetTrap(pVCpu);
9804#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9805 /* Injecting an event may cause a VM-exit. */
9806 if ( rcStrict != VINF_SUCCESS
9807 && rcStrict != VINF_IEM_RAISED_XCPT)
9808 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9809#else
9810 NOREF(rcStrict);
9811#endif
9812 }
9813
9814 /*
9815 * Initial decoder init w/ prefetch, then setup setjmp.
9816 */
9817 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9818 if (rcStrict == VINF_SUCCESS)
9819 {
9820#ifdef IEM_WITH_SETJMP
9821 jmp_buf JmpBuf;
9822 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9823 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9824 pVCpu->iem.s.cActiveMappings = 0;
9825 if ((rcStrict = setjmp(JmpBuf)) == 0)
9826#endif
9827 {
9828 /*
9829 * The run loop. We limit ourselves to 4096 instructions right now.
9830 */
9831 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9832 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9833 for (;;)
9834 {
9835 /*
9836 * Log the state.
9837 */
9838#ifdef LOG_ENABLED
9839 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9840#endif
9841
9842 /*
9843 * Do the decoding and emulation.
9844 */
9845 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9846 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9847 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9848 {
9849 Assert(pVCpu->iem.s.cActiveMappings == 0);
9850 pVCpu->iem.s.cInstructions++;
9851 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9852 {
9853 uint64_t fCpu = pVCpu->fLocalForcedActions
9854 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9855 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9856 | VMCPU_FF_TLB_FLUSH
9857 | VMCPU_FF_INHIBIT_INTERRUPTS
9858 | VMCPU_FF_BLOCK_NMIS
9859 | VMCPU_FF_UNHALT ));
9860
9861 if (RT_LIKELY( ( !fCpu
9862 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9863 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9864 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9865 {
9866 if (cMaxInstructionsGccStupidity-- > 0)
9867 {
9868 /* Poll timers every now an then according to the caller's specs. */
9869 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9870 || !TMTimerPollBool(pVM, pVCpu))
9871 {
9872 Assert(pVCpu->iem.s.cActiveMappings == 0);
9873 iemReInitDecoder(pVCpu);
9874 continue;
9875 }
9876 }
9877 }
9878 }
9879 Assert(pVCpu->iem.s.cActiveMappings == 0);
9880 }
9881 else if (pVCpu->iem.s.cActiveMappings > 0)
9882 iemMemRollback(pVCpu);
9883 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9884 break;
9885 }
9886 }
9887#ifdef IEM_WITH_SETJMP
9888 else
9889 {
9890 if (pVCpu->iem.s.cActiveMappings > 0)
9891 iemMemRollback(pVCpu);
9892# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9893 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9894# endif
9895 pVCpu->iem.s.cLongJumps++;
9896 }
9897 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9898#endif
9899
9900 /*
9901 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9902 */
9903 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9904 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9905 }
9906 else
9907 {
9908 if (pVCpu->iem.s.cActiveMappings > 0)
9909 iemMemRollback(pVCpu);
9910
9911#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9912 /*
9913 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9914 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9915 */
9916 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9917#endif
9918 }
9919
9920 /*
9921 * Maybe re-enter raw-mode and log.
9922 */
9923 if (rcStrict != VINF_SUCCESS)
9924 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9925 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9926 if (pcInstructions)
9927 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9928 return rcStrict;
9929}
9930
9931
9932/**
9933 * Interface used by EMExecuteExec, does exit statistics and limits.
9934 *
9935 * @returns Strict VBox status code.
9936 * @param pVCpu The cross context virtual CPU structure.
9937 * @param fWillExit To be defined.
9938 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9939 * @param cMaxInstructions Maximum number of instructions to execute.
9940 * @param cMaxInstructionsWithoutExits
9941 * The max number of instructions without exits.
9942 * @param pStats Where to return statistics.
9943 */
9944VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9945 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9946{
9947 NOREF(fWillExit); /** @todo define flexible exit crits */
9948
9949 /*
9950 * Initialize return stats.
9951 */
9952 pStats->cInstructions = 0;
9953 pStats->cExits = 0;
9954 pStats->cMaxExitDistance = 0;
9955 pStats->cReserved = 0;
9956
9957 /*
9958 * Initial decoder init w/ prefetch, then setup setjmp.
9959 */
9960 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9961 if (rcStrict == VINF_SUCCESS)
9962 {
9963#ifdef IEM_WITH_SETJMP
9964 jmp_buf JmpBuf;
9965 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9966 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9967 pVCpu->iem.s.cActiveMappings = 0;
9968 if ((rcStrict = setjmp(JmpBuf)) == 0)
9969#endif
9970 {
9971#ifdef IN_RING0
9972 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9973#endif
9974 uint32_t cInstructionSinceLastExit = 0;
9975
9976 /*
9977 * The run loop. We limit ourselves to 4096 instructions right now.
9978 */
9979 PVM pVM = pVCpu->CTX_SUFF(pVM);
9980 for (;;)
9981 {
9982 /*
9983 * Log the state.
9984 */
9985#ifdef LOG_ENABLED
9986 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9987#endif
9988
9989 /*
9990 * Do the decoding and emulation.
9991 */
9992 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9993
9994 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9995 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9996
9997 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9998 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9999 {
10000 pStats->cExits += 1;
10001 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10002 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10003 cInstructionSinceLastExit = 0;
10004 }
10005
10006 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10007 {
10008 Assert(pVCpu->iem.s.cActiveMappings == 0);
10009 pVCpu->iem.s.cInstructions++;
10010 pStats->cInstructions++;
10011 cInstructionSinceLastExit++;
10012 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10013 {
10014 uint64_t fCpu = pVCpu->fLocalForcedActions
10015 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10016 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10017 | VMCPU_FF_TLB_FLUSH
10018 | VMCPU_FF_INHIBIT_INTERRUPTS
10019 | VMCPU_FF_BLOCK_NMIS
10020 | VMCPU_FF_UNHALT ));
10021
10022 if (RT_LIKELY( ( ( !fCpu
10023 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10024 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10025 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10026 || pStats->cInstructions < cMinInstructions))
10027 {
10028 if (pStats->cInstructions < cMaxInstructions)
10029 {
10030 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10031 {
10032#ifdef IN_RING0
10033 if ( !fCheckPreemptionPending
10034 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10035#endif
10036 {
10037 Assert(pVCpu->iem.s.cActiveMappings == 0);
10038 iemReInitDecoder(pVCpu);
10039 continue;
10040 }
10041#ifdef IN_RING0
10042 rcStrict = VINF_EM_RAW_INTERRUPT;
10043 break;
10044#endif
10045 }
10046 }
10047 }
10048 Assert(!(fCpu & VMCPU_FF_IEM));
10049 }
10050 Assert(pVCpu->iem.s.cActiveMappings == 0);
10051 }
10052 else if (pVCpu->iem.s.cActiveMappings > 0)
10053 iemMemRollback(pVCpu);
10054 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10055 break;
10056 }
10057 }
10058#ifdef IEM_WITH_SETJMP
10059 else
10060 {
10061 if (pVCpu->iem.s.cActiveMappings > 0)
10062 iemMemRollback(pVCpu);
10063 pVCpu->iem.s.cLongJumps++;
10064 }
10065 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10066#endif
10067
10068 /*
10069 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10070 */
10071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10073 }
10074 else
10075 {
10076 if (pVCpu->iem.s.cActiveMappings > 0)
10077 iemMemRollback(pVCpu);
10078
10079#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10080 /*
10081 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10082 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10083 */
10084 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10085#endif
10086 }
10087
10088 /*
10089 * Maybe re-enter raw-mode and log.
10090 */
10091 if (rcStrict != VINF_SUCCESS)
10092 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10093 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10094 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10095 return rcStrict;
10096}
10097
10098
10099/**
10100 * Injects a trap, fault, abort, software interrupt or external interrupt.
10101 *
10102 * The parameter list matches TRPMQueryTrapAll pretty closely.
10103 *
10104 * @returns Strict VBox status code.
10105 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10106 * @param u8TrapNo The trap number.
10107 * @param enmType What type is it (trap/fault/abort), software
10108 * interrupt or hardware interrupt.
10109 * @param uErrCode The error code if applicable.
10110 * @param uCr2 The CR2 value if applicable.
10111 * @param cbInstr The instruction length (only relevant for
10112 * software interrupts).
10113 */
10114VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10115 uint8_t cbInstr)
10116{
10117 iemInitDecoder(pVCpu, false, false);
10118#ifdef DBGFTRACE_ENABLED
10119 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10120 u8TrapNo, enmType, uErrCode, uCr2);
10121#endif
10122
10123 uint32_t fFlags;
10124 switch (enmType)
10125 {
10126 case TRPM_HARDWARE_INT:
10127 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10128 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10129 uErrCode = uCr2 = 0;
10130 break;
10131
10132 case TRPM_SOFTWARE_INT:
10133 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10134 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10135 uErrCode = uCr2 = 0;
10136 break;
10137
10138 case TRPM_TRAP:
10139 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10140 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10141 if (u8TrapNo == X86_XCPT_PF)
10142 fFlags |= IEM_XCPT_FLAGS_CR2;
10143 switch (u8TrapNo)
10144 {
10145 case X86_XCPT_DF:
10146 case X86_XCPT_TS:
10147 case X86_XCPT_NP:
10148 case X86_XCPT_SS:
10149 case X86_XCPT_PF:
10150 case X86_XCPT_AC:
10151 case X86_XCPT_GP:
10152 fFlags |= IEM_XCPT_FLAGS_ERR;
10153 break;
10154 }
10155 break;
10156
10157 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10158 }
10159
10160 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10161
10162 if (pVCpu->iem.s.cActiveMappings > 0)
10163 iemMemRollback(pVCpu);
10164
10165 return rcStrict;
10166}
10167
10168
10169/**
10170 * Injects the active TRPM event.
10171 *
10172 * @returns Strict VBox status code.
10173 * @param pVCpu The cross context virtual CPU structure.
10174 */
10175VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10176{
10177#ifndef IEM_IMPLEMENTS_TASKSWITCH
10178 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10179#else
10180 uint8_t u8TrapNo;
10181 TRPMEVENT enmType;
10182 uint32_t uErrCode;
10183 RTGCUINTPTR uCr2;
10184 uint8_t cbInstr;
10185 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10186 if (RT_FAILURE(rc))
10187 return rc;
10188
10189 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10190 * ICEBP \#DB injection as a special case. */
10191 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10192#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10193 if (rcStrict == VINF_SVM_VMEXIT)
10194 rcStrict = VINF_SUCCESS;
10195#endif
10196#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10197 if (rcStrict == VINF_VMX_VMEXIT)
10198 rcStrict = VINF_SUCCESS;
10199#endif
10200 /** @todo Are there any other codes that imply the event was successfully
10201 * delivered to the guest? See @bugref{6607}. */
10202 if ( rcStrict == VINF_SUCCESS
10203 || rcStrict == VINF_IEM_RAISED_XCPT)
10204 TRPMResetTrap(pVCpu);
10205
10206 return rcStrict;
10207#endif
10208}
10209
10210
10211VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10212{
10213 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10214 return VERR_NOT_IMPLEMENTED;
10215}
10216
10217
10218VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10219{
10220 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10221 return VERR_NOT_IMPLEMENTED;
10222}
10223
10224
10225#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10226/**
10227 * Executes a IRET instruction with default operand size.
10228 *
10229 * This is for PATM.
10230 *
10231 * @returns VBox status code.
10232 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10233 * @param pCtxCore The register frame.
10234 */
10235VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10236{
10237 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10238
10239 iemCtxCoreToCtx(pCtx, pCtxCore);
10240 iemInitDecoder(pVCpu);
10241 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10242 if (rcStrict == VINF_SUCCESS)
10243 iemCtxToCtxCore(pCtxCore, pCtx);
10244 else
10245 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10246 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10247 return rcStrict;
10248}
10249#endif
10250
10251
10252/**
10253 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10254 *
10255 * This API ASSUMES that the caller has already verified that the guest code is
10256 * allowed to access the I/O port. (The I/O port is in the DX register in the
10257 * guest state.)
10258 *
10259 * @returns Strict VBox status code.
10260 * @param pVCpu The cross context virtual CPU structure.
10261 * @param cbValue The size of the I/O port access (1, 2, or 4).
10262 * @param enmAddrMode The addressing mode.
10263 * @param fRepPrefix Indicates whether a repeat prefix is used
10264 * (doesn't matter which for this instruction).
10265 * @param cbInstr The instruction length in bytes.
10266 * @param iEffSeg The effective segment address.
10267 * @param fIoChecked Whether the access to the I/O port has been
10268 * checked or not. It's typically checked in the
10269 * HM scenario.
10270 */
10271VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10272 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10273{
10274 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10275 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10276
10277 /*
10278 * State init.
10279 */
10280 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10281
10282 /*
10283 * Switch orgy for getting to the right handler.
10284 */
10285 VBOXSTRICTRC rcStrict;
10286 if (fRepPrefix)
10287 {
10288 switch (enmAddrMode)
10289 {
10290 case IEMMODE_16BIT:
10291 switch (cbValue)
10292 {
10293 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10294 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10295 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10296 default:
10297 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10298 }
10299 break;
10300
10301 case IEMMODE_32BIT:
10302 switch (cbValue)
10303 {
10304 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10305 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10306 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10307 default:
10308 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10309 }
10310 break;
10311
10312 case IEMMODE_64BIT:
10313 switch (cbValue)
10314 {
10315 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10316 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10317 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10318 default:
10319 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10320 }
10321 break;
10322
10323 default:
10324 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10325 }
10326 }
10327 else
10328 {
10329 switch (enmAddrMode)
10330 {
10331 case IEMMODE_16BIT:
10332 switch (cbValue)
10333 {
10334 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10335 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10336 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10337 default:
10338 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10339 }
10340 break;
10341
10342 case IEMMODE_32BIT:
10343 switch (cbValue)
10344 {
10345 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10346 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10347 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10348 default:
10349 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10350 }
10351 break;
10352
10353 case IEMMODE_64BIT:
10354 switch (cbValue)
10355 {
10356 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10357 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10358 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10359 default:
10360 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10361 }
10362 break;
10363
10364 default:
10365 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10366 }
10367 }
10368
10369 if (pVCpu->iem.s.cActiveMappings)
10370 iemMemRollback(pVCpu);
10371
10372 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10373}
10374
10375
10376/**
10377 * Interface for HM and EM for executing string I/O IN (read) instructions.
10378 *
10379 * This API ASSUMES that the caller has already verified that the guest code is
10380 * allowed to access the I/O port. (The I/O port is in the DX register in the
10381 * guest state.)
10382 *
10383 * @returns Strict VBox status code.
10384 * @param pVCpu The cross context virtual CPU structure.
10385 * @param cbValue The size of the I/O port access (1, 2, or 4).
10386 * @param enmAddrMode The addressing mode.
10387 * @param fRepPrefix Indicates whether a repeat prefix is used
10388 * (doesn't matter which for this instruction).
10389 * @param cbInstr The instruction length in bytes.
10390 * @param fIoChecked Whether the access to the I/O port has been
10391 * checked or not. It's typically checked in the
10392 * HM scenario.
10393 */
10394VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10395 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10396{
10397 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10398
10399 /*
10400 * State init.
10401 */
10402 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10403
10404 /*
10405 * Switch orgy for getting to the right handler.
10406 */
10407 VBOXSTRICTRC rcStrict;
10408 if (fRepPrefix)
10409 {
10410 switch (enmAddrMode)
10411 {
10412 case IEMMODE_16BIT:
10413 switch (cbValue)
10414 {
10415 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10416 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10417 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10418 default:
10419 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10420 }
10421 break;
10422
10423 case IEMMODE_32BIT:
10424 switch (cbValue)
10425 {
10426 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10427 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10428 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10429 default:
10430 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10431 }
10432 break;
10433
10434 case IEMMODE_64BIT:
10435 switch (cbValue)
10436 {
10437 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10438 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10439 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10440 default:
10441 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10442 }
10443 break;
10444
10445 default:
10446 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10447 }
10448 }
10449 else
10450 {
10451 switch (enmAddrMode)
10452 {
10453 case IEMMODE_16BIT:
10454 switch (cbValue)
10455 {
10456 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10457 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10458 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10459 default:
10460 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10461 }
10462 break;
10463
10464 case IEMMODE_32BIT:
10465 switch (cbValue)
10466 {
10467 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10468 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10469 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10470 default:
10471 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10472 }
10473 break;
10474
10475 case IEMMODE_64BIT:
10476 switch (cbValue)
10477 {
10478 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10479 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10480 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10481 default:
10482 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10483 }
10484 break;
10485
10486 default:
10487 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10488 }
10489 }
10490
10491 if ( pVCpu->iem.s.cActiveMappings == 0
10492 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10493 { /* likely */ }
10494 else
10495 {
10496 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10497 iemMemRollback(pVCpu);
10498 }
10499 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10500}
10501
10502
10503/**
10504 * Interface for rawmode to write execute an OUT instruction.
10505 *
10506 * @returns Strict VBox status code.
10507 * @param pVCpu The cross context virtual CPU structure.
10508 * @param cbInstr The instruction length in bytes.
10509 * @param u16Port The port to read.
10510 * @param fImm Whether the port is specified using an immediate operand or
10511 * using the implicit DX register.
10512 * @param cbReg The register size.
10513 *
10514 * @remarks In ring-0 not all of the state needs to be synced in.
10515 */
10516VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10517{
10518 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10519 Assert(cbReg <= 4 && cbReg != 3);
10520
10521 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10522 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10523 Assert(!pVCpu->iem.s.cActiveMappings);
10524 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10525}
10526
10527
10528/**
10529 * Interface for rawmode to write execute an IN instruction.
10530 *
10531 * @returns Strict VBox status code.
10532 * @param pVCpu The cross context virtual CPU structure.
10533 * @param cbInstr The instruction length in bytes.
10534 * @param u16Port The port to read.
10535 * @param fImm Whether the port is specified using an immediate operand or
10536 * using the implicit DX.
10537 * @param cbReg The register size.
10538 */
10539VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10540{
10541 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10542 Assert(cbReg <= 4 && cbReg != 3);
10543
10544 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10545 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10546 Assert(!pVCpu->iem.s.cActiveMappings);
10547 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10548}
10549
10550
10551/**
10552 * Interface for HM and EM to write to a CRx register.
10553 *
10554 * @returns Strict VBox status code.
10555 * @param pVCpu The cross context virtual CPU structure.
10556 * @param cbInstr The instruction length in bytes.
10557 * @param iCrReg The control register number (destination).
10558 * @param iGReg The general purpose register number (source).
10559 *
10560 * @remarks In ring-0 not all of the state needs to be synced in.
10561 */
10562VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10563{
10564 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10565 Assert(iCrReg < 16);
10566 Assert(iGReg < 16);
10567
10568 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10569 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10570 Assert(!pVCpu->iem.s.cActiveMappings);
10571 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10572}
10573
10574
10575/**
10576 * Interface for HM and EM to read from a CRx register.
10577 *
10578 * @returns Strict VBox status code.
10579 * @param pVCpu The cross context virtual CPU structure.
10580 * @param cbInstr The instruction length in bytes.
10581 * @param iGReg The general purpose register number (destination).
10582 * @param iCrReg The control register number (source).
10583 *
10584 * @remarks In ring-0 not all of the state needs to be synced in.
10585 */
10586VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10587{
10588 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10589 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10590 | CPUMCTX_EXTRN_APIC_TPR);
10591 Assert(iCrReg < 16);
10592 Assert(iGReg < 16);
10593
10594 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10595 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10596 Assert(!pVCpu->iem.s.cActiveMappings);
10597 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10598}
10599
10600
10601/**
10602 * Interface for HM and EM to clear the CR0[TS] bit.
10603 *
10604 * @returns Strict VBox status code.
10605 * @param pVCpu The cross context virtual CPU structure.
10606 * @param cbInstr The instruction length in bytes.
10607 *
10608 * @remarks In ring-0 not all of the state needs to be synced in.
10609 */
10610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10611{
10612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10613
10614 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10615 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10616 Assert(!pVCpu->iem.s.cActiveMappings);
10617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10618}
10619
10620
10621/**
10622 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10623 *
10624 * @returns Strict VBox status code.
10625 * @param pVCpu The cross context virtual CPU structure.
10626 * @param cbInstr The instruction length in bytes.
10627 * @param uValue The value to load into CR0.
10628 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10629 * memory operand. Otherwise pass NIL_RTGCPTR.
10630 *
10631 * @remarks In ring-0 not all of the state needs to be synced in.
10632 */
10633VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10634{
10635 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10636
10637 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10638 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10639 Assert(!pVCpu->iem.s.cActiveMappings);
10640 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10641}
10642
10643
10644/**
10645 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10646 *
10647 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10648 *
10649 * @returns Strict VBox status code.
10650 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10651 * @param cbInstr The instruction length in bytes.
10652 * @remarks In ring-0 not all of the state needs to be synced in.
10653 * @thread EMT(pVCpu)
10654 */
10655VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10656{
10657 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10658
10659 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10660 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10661 Assert(!pVCpu->iem.s.cActiveMappings);
10662 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10663}
10664
10665
10666/**
10667 * Interface for HM and EM to emulate the WBINVD instruction.
10668 *
10669 * @returns Strict VBox status code.
10670 * @param pVCpu The cross context virtual CPU structure.
10671 * @param cbInstr The instruction length in bytes.
10672 *
10673 * @remarks In ring-0 not all of the state needs to be synced in.
10674 */
10675VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10676{
10677 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10678
10679 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10680 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10681 Assert(!pVCpu->iem.s.cActiveMappings);
10682 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10683}
10684
10685
10686/**
10687 * Interface for HM and EM to emulate the INVD instruction.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure.
10691 * @param cbInstr The instruction length in bytes.
10692 *
10693 * @remarks In ring-0 not all of the state needs to be synced in.
10694 */
10695VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10696{
10697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10698
10699 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10700 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10701 Assert(!pVCpu->iem.s.cActiveMappings);
10702 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10703}
10704
10705
10706/**
10707 * Interface for HM and EM to emulate the INVLPG instruction.
10708 *
10709 * @returns Strict VBox status code.
10710 * @retval VINF_PGM_SYNC_CR3
10711 *
10712 * @param pVCpu The cross context virtual CPU structure.
10713 * @param cbInstr The instruction length in bytes.
10714 * @param GCPtrPage The effective address of the page to invalidate.
10715 *
10716 * @remarks In ring-0 not all of the state needs to be synced in.
10717 */
10718VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10719{
10720 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10721
10722 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10723 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10724 Assert(!pVCpu->iem.s.cActiveMappings);
10725 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10726}
10727
10728
10729/**
10730 * Interface for HM and EM to emulate the INVPCID instruction.
10731 *
10732 * @returns Strict VBox status code.
10733 * @retval VINF_PGM_SYNC_CR3
10734 *
10735 * @param pVCpu The cross context virtual CPU structure.
10736 * @param cbInstr The instruction length in bytes.
10737 * @param iEffSeg The effective segment register.
10738 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10739 * @param uType The invalidation type.
10740 *
10741 * @remarks In ring-0 not all of the state needs to be synced in.
10742 */
10743VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10744 uint64_t uType)
10745{
10746 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10747
10748 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10749 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10750 Assert(!pVCpu->iem.s.cActiveMappings);
10751 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10752}
10753
10754
10755/**
10756 * Interface for HM and EM to emulate the CPUID instruction.
10757 *
10758 * @returns Strict VBox status code.
10759 *
10760 * @param pVCpu The cross context virtual CPU structure.
10761 * @param cbInstr The instruction length in bytes.
10762 *
10763 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10764 */
10765VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10766{
10767 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10768 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10769
10770 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10771 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10772 Assert(!pVCpu->iem.s.cActiveMappings);
10773 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10774}
10775
10776
10777/**
10778 * Interface for HM and EM to emulate the RDPMC instruction.
10779 *
10780 * @returns Strict VBox status code.
10781 *
10782 * @param pVCpu The cross context virtual CPU structure.
10783 * @param cbInstr The instruction length in bytes.
10784 *
10785 * @remarks Not all of the state needs to be synced in.
10786 */
10787VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10788{
10789 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10790 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10791
10792 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10793 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10794 Assert(!pVCpu->iem.s.cActiveMappings);
10795 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10796}
10797
10798
10799/**
10800 * Interface for HM and EM to emulate the RDTSC instruction.
10801 *
10802 * @returns Strict VBox status code.
10803 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10804 *
10805 * @param pVCpu The cross context virtual CPU structure.
10806 * @param cbInstr The instruction length in bytes.
10807 *
10808 * @remarks Not all of the state needs to be synced in.
10809 */
10810VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10811{
10812 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10813 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10814
10815 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10816 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10817 Assert(!pVCpu->iem.s.cActiveMappings);
10818 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10819}
10820
10821
10822/**
10823 * Interface for HM and EM to emulate the RDTSCP instruction.
10824 *
10825 * @returns Strict VBox status code.
10826 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10827 *
10828 * @param pVCpu The cross context virtual CPU structure.
10829 * @param cbInstr The instruction length in bytes.
10830 *
10831 * @remarks Not all of the state needs to be synced in. Recommended
10832 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10833 */
10834VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10835{
10836 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10837 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10838
10839 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10840 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10841 Assert(!pVCpu->iem.s.cActiveMappings);
10842 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10843}
10844
10845
10846/**
10847 * Interface for HM and EM to emulate the RDMSR instruction.
10848 *
10849 * @returns Strict VBox status code.
10850 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10851 *
10852 * @param pVCpu The cross context virtual CPU structure.
10853 * @param cbInstr The instruction length in bytes.
10854 *
10855 * @remarks Not all of the state needs to be synced in. Requires RCX and
10856 * (currently) all MSRs.
10857 */
10858VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10859{
10860 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10861 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10862
10863 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10864 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10865 Assert(!pVCpu->iem.s.cActiveMappings);
10866 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10867}
10868
10869
10870/**
10871 * Interface for HM and EM to emulate the WRMSR instruction.
10872 *
10873 * @returns Strict VBox status code.
10874 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10875 *
10876 * @param pVCpu The cross context virtual CPU structure.
10877 * @param cbInstr The instruction length in bytes.
10878 *
10879 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10880 * and (currently) all MSRs.
10881 */
10882VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10883{
10884 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10885 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10886 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10887
10888 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10889 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10890 Assert(!pVCpu->iem.s.cActiveMappings);
10891 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10892}
10893
10894
10895/**
10896 * Interface for HM and EM to emulate the MONITOR instruction.
10897 *
10898 * @returns Strict VBox status code.
10899 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10900 *
10901 * @param pVCpu The cross context virtual CPU structure.
10902 * @param cbInstr The instruction length in bytes.
10903 *
10904 * @remarks Not all of the state needs to be synced in.
10905 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10906 * are used.
10907 */
10908VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10909{
10910 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10911 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10912
10913 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10914 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10915 Assert(!pVCpu->iem.s.cActiveMappings);
10916 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10917}
10918
10919
10920/**
10921 * Interface for HM and EM to emulate the MWAIT instruction.
10922 *
10923 * @returns Strict VBox status code.
10924 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10925 *
10926 * @param pVCpu The cross context virtual CPU structure.
10927 * @param cbInstr The instruction length in bytes.
10928 *
10929 * @remarks Not all of the state needs to be synced in.
10930 */
10931VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10932{
10933 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10934 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10935
10936 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10937 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10938 Assert(!pVCpu->iem.s.cActiveMappings);
10939 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10940}
10941
10942
10943/**
10944 * Interface for HM and EM to emulate the HLT instruction.
10945 *
10946 * @returns Strict VBox status code.
10947 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10948 *
10949 * @param pVCpu The cross context virtual CPU structure.
10950 * @param cbInstr The instruction length in bytes.
10951 *
10952 * @remarks Not all of the state needs to be synced in.
10953 */
10954VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10955{
10956 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10957
10958 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10959 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10960 Assert(!pVCpu->iem.s.cActiveMappings);
10961 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10962}
10963
10964
10965/**
10966 * Checks if IEM is in the process of delivering an event (interrupt or
10967 * exception).
10968 *
10969 * @returns true if we're in the process of raising an interrupt or exception,
10970 * false otherwise.
10971 * @param pVCpu The cross context virtual CPU structure.
10972 * @param puVector Where to store the vector associated with the
10973 * currently delivered event, optional.
10974 * @param pfFlags Where to store th event delivery flags (see
10975 * IEM_XCPT_FLAGS_XXX), optional.
10976 * @param puErr Where to store the error code associated with the
10977 * event, optional.
10978 * @param puCr2 Where to store the CR2 associated with the event,
10979 * optional.
10980 * @remarks The caller should check the flags to determine if the error code and
10981 * CR2 are valid for the event.
10982 */
10983VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10984{
10985 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10986 if (fRaisingXcpt)
10987 {
10988 if (puVector)
10989 *puVector = pVCpu->iem.s.uCurXcpt;
10990 if (pfFlags)
10991 *pfFlags = pVCpu->iem.s.fCurXcpt;
10992 if (puErr)
10993 *puErr = pVCpu->iem.s.uCurXcptErr;
10994 if (puCr2)
10995 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10996 }
10997 return fRaisingXcpt;
10998}
10999
11000#ifdef IN_RING3
11001
11002/**
11003 * Handles the unlikely and probably fatal merge cases.
11004 *
11005 * @returns Merged status code.
11006 * @param rcStrict Current EM status code.
11007 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11008 * with @a rcStrict.
11009 * @param iMemMap The memory mapping index. For error reporting only.
11010 * @param pVCpu The cross context virtual CPU structure of the calling
11011 * thread, for error reporting only.
11012 */
11013DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11014 unsigned iMemMap, PVMCPUCC pVCpu)
11015{
11016 if (RT_FAILURE_NP(rcStrict))
11017 return rcStrict;
11018
11019 if (RT_FAILURE_NP(rcStrictCommit))
11020 return rcStrictCommit;
11021
11022 if (rcStrict == rcStrictCommit)
11023 return rcStrictCommit;
11024
11025 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11026 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11027 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11028 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11029 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11030 return VERR_IOM_FF_STATUS_IPE;
11031}
11032
11033
11034/**
11035 * Helper for IOMR3ProcessForceFlag.
11036 *
11037 * @returns Merged status code.
11038 * @param rcStrict Current EM status code.
11039 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11040 * with @a rcStrict.
11041 * @param iMemMap The memory mapping index. For error reporting only.
11042 * @param pVCpu The cross context virtual CPU structure of the calling
11043 * thread, for error reporting only.
11044 */
11045DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11046{
11047 /* Simple. */
11048 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11049 return rcStrictCommit;
11050
11051 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11052 return rcStrict;
11053
11054 /* EM scheduling status codes. */
11055 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11056 && rcStrict <= VINF_EM_LAST))
11057 {
11058 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11059 && rcStrictCommit <= VINF_EM_LAST))
11060 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11061 }
11062
11063 /* Unlikely */
11064 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11065}
11066
11067
11068/**
11069 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11070 *
11071 * @returns Merge between @a rcStrict and what the commit operation returned.
11072 * @param pVM The cross context VM structure.
11073 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11074 * @param rcStrict The status code returned by ring-0 or raw-mode.
11075 */
11076VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11077{
11078 /*
11079 * Reset the pending commit.
11080 */
11081 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11082 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11083 ("%#x %#x %#x\n",
11084 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11085 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11086
11087 /*
11088 * Commit the pending bounce buffers (usually just one).
11089 */
11090 unsigned cBufs = 0;
11091 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11092 while (iMemMap-- > 0)
11093 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11094 {
11095 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11096 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11097 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11098
11099 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11100 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11101 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11102
11103 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11104 {
11105 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11106 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11107 pbBuf,
11108 cbFirst,
11109 PGMACCESSORIGIN_IEM);
11110 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11111 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11112 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11113 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11114 }
11115
11116 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11117 {
11118 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11119 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11120 pbBuf + cbFirst,
11121 cbSecond,
11122 PGMACCESSORIGIN_IEM);
11123 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11124 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11125 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11126 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11127 }
11128 cBufs++;
11129 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11130 }
11131
11132 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11133 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11134 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11135 pVCpu->iem.s.cActiveMappings = 0;
11136 return rcStrict;
11137}
11138
11139#endif /* IN_RING3 */
11140
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette