VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 94800

Last change on this file since 94800 was 94800, checked in by vboxsync, 2 years ago

VMM/IEM,PGM: TLB work, esp. on the data one. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 444.8 KB
Line 
1/* $Id: IEMAll.cpp 94800 2022-05-03 21:49:43Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow) : Basic enter/exit IEM state info.
65 * - Level 2 (Log2) : ?
66 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
67 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5) : Decoding details.
69 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7) : iret++ execution logging.
71 * - Level 8 (Log8) : Memory writes.
72 * - Level 9 (Log9) : Memory reads.
73 * - Level 10 (Log10): TLBs.
74 */
75
76/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
77#ifdef _MSC_VER
78# pragma warning(disable:4505)
79#endif
80
81
82/*********************************************************************************************************************************
83* Header Files *
84*********************************************************************************************************************************/
85#define LOG_GROUP LOG_GROUP_IEM
86#define VMCPU_INCL_CPUM_GST_CTX
87#include <VBox/vmm/iem.h>
88#include <VBox/vmm/cpum.h>
89#include <VBox/vmm/apic.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <VBox/vmm/iom.h>
93#include <VBox/vmm/em.h>
94#include <VBox/vmm/hm.h>
95#include <VBox/vmm/nem.h>
96#include <VBox/vmm/gim.h>
97#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
98# include <VBox/vmm/em.h>
99# include <VBox/vmm/hm_svm.h>
100#endif
101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
102# include <VBox/vmm/hmvmxinline.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#include "IEMInternal.h"
108#include <VBox/vmm/vmcc.h>
109#include <VBox/log.h>
110#include <VBox/err.h>
111#include <VBox/param.h>
112#include <VBox/dis.h>
113#include <VBox/disopcode.h>
114#include <iprt/asm-math.h>
115#include <iprt/assert.h>
116#include <iprt/string.h>
117#include <iprt/x86.h>
118
119#include "IEMInline.h"
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/**
126 * CPU exception classes.
127 */
128typedef enum IEMXCPTCLASS
129{
130 IEMXCPTCLASS_BENIGN,
131 IEMXCPTCLASS_CONTRIBUTORY,
132 IEMXCPTCLASS_PAGE_FAULT,
133 IEMXCPTCLASS_DOUBLE_FAULT
134} IEMXCPTCLASS;
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140#if defined(IEM_LOG_MEMORY_WRITES)
141/** What IEM just wrote. */
142uint8_t g_abIemWrote[256];
143/** How much IEM just wrote. */
144size_t g_cbIemWrote;
145#endif
146
147
148/*********************************************************************************************************************************
149* Internal Functions *
150*********************************************************************************************************************************/
151static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
152 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
153
154
155/**
156 * Initializes the decoder state.
157 *
158 * iemReInitDecoder is mostly a copy of this function.
159 *
160 * @param pVCpu The cross context virtual CPU structure of the
161 * calling thread.
162 * @param fBypassHandlers Whether to bypass access handlers.
163 * @param fDisregardLock Whether to disregard the LOCK prefix.
164 */
165DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
166{
167 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
168 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
177
178 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
179 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
180 pVCpu->iem.s.enmCpuMode = enmMode;
181 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
182 pVCpu->iem.s.enmEffAddrMode = enmMode;
183 if (enmMode != IEMMODE_64BIT)
184 {
185 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
186 pVCpu->iem.s.enmEffOpSize = enmMode;
187 }
188 else
189 {
190 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
191 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
192 }
193 pVCpu->iem.s.fPrefixes = 0;
194 pVCpu->iem.s.uRexReg = 0;
195 pVCpu->iem.s.uRexB = 0;
196 pVCpu->iem.s.uRexIndex = 0;
197 pVCpu->iem.s.idxPrefix = 0;
198 pVCpu->iem.s.uVex3rdReg = 0;
199 pVCpu->iem.s.uVexLength = 0;
200 pVCpu->iem.s.fEvexStuff = 0;
201 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
202#ifdef IEM_WITH_CODE_TLB
203 pVCpu->iem.s.pbInstrBuf = NULL;
204 pVCpu->iem.s.offInstrNextByte = 0;
205 pVCpu->iem.s.offCurInstrStart = 0;
206# ifdef VBOX_STRICT
207 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
208 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
209 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
210# endif
211#else
212 pVCpu->iem.s.offOpcode = 0;
213 pVCpu->iem.s.cbOpcode = 0;
214#endif
215 pVCpu->iem.s.offModRm = 0;
216 pVCpu->iem.s.cActiveMappings = 0;
217 pVCpu->iem.s.iNextMapping = 0;
218 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
219 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
220 pVCpu->iem.s.fDisregardLock = fDisregardLock;
221
222#ifdef DBGFTRACE_ENABLED
223 switch (enmMode)
224 {
225 case IEMMODE_64BIT:
226 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
227 break;
228 case IEMMODE_32BIT:
229 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
230 break;
231 case IEMMODE_16BIT:
232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
233 break;
234 }
235#endif
236}
237
238
239/**
240 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
241 *
242 * This is mostly a copy of iemInitDecoder.
243 *
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 */
246DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
247{
248 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
257
258 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
259 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
260 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
261 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
262 pVCpu->iem.s.enmEffAddrMode = enmMode;
263 if (enmMode != IEMMODE_64BIT)
264 {
265 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffOpSize = enmMode;
267 }
268 else
269 {
270 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
271 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
272 }
273 pVCpu->iem.s.fPrefixes = 0;
274 pVCpu->iem.s.uRexReg = 0;
275 pVCpu->iem.s.uRexB = 0;
276 pVCpu->iem.s.uRexIndex = 0;
277 pVCpu->iem.s.idxPrefix = 0;
278 pVCpu->iem.s.uVex3rdReg = 0;
279 pVCpu->iem.s.uVexLength = 0;
280 pVCpu->iem.s.fEvexStuff = 0;
281 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
282#ifdef IEM_WITH_CODE_TLB
283 if (pVCpu->iem.s.pbInstrBuf)
284 {
285 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
286 - pVCpu->iem.s.uInstrBufPc;
287 if (off < pVCpu->iem.s.cbInstrBufTotal)
288 {
289 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
290 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
291 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
292 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
293 else
294 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
295 }
296 else
297 {
298 pVCpu->iem.s.pbInstrBuf = NULL;
299 pVCpu->iem.s.offInstrNextByte = 0;
300 pVCpu->iem.s.offCurInstrStart = 0;
301 pVCpu->iem.s.cbInstrBuf = 0;
302 pVCpu->iem.s.cbInstrBufTotal = 0;
303 }
304 }
305 else
306 {
307 pVCpu->iem.s.offInstrNextByte = 0;
308 pVCpu->iem.s.offCurInstrStart = 0;
309 pVCpu->iem.s.cbInstrBuf = 0;
310 pVCpu->iem.s.cbInstrBufTotal = 0;
311 }
312#else
313 pVCpu->iem.s.cbOpcode = 0;
314 pVCpu->iem.s.offOpcode = 0;
315#endif
316 pVCpu->iem.s.offModRm = 0;
317 Assert(pVCpu->iem.s.cActiveMappings == 0);
318 pVCpu->iem.s.iNextMapping = 0;
319 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
320 Assert(pVCpu->iem.s.fBypassHandlers == false);
321
322#ifdef DBGFTRACE_ENABLED
323 switch (enmMode)
324 {
325 case IEMMODE_64BIT:
326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
327 break;
328 case IEMMODE_32BIT:
329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
330 break;
331 case IEMMODE_16BIT:
332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
333 break;
334 }
335#endif
336}
337
338
339
340/**
341 * Prefetch opcodes the first time when starting executing.
342 *
343 * @returns Strict VBox status code.
344 * @param pVCpu The cross context virtual CPU structure of the
345 * calling thread.
346 * @param fBypassHandlers Whether to bypass access handlers.
347 * @param fDisregardLock Whether to disregard LOCK prefixes.
348 *
349 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
350 * store them as such.
351 */
352static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
353{
354 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
355
356#ifdef IEM_WITH_CODE_TLB
357 /** @todo Do ITLB lookup here. */
358
359#else /* !IEM_WITH_CODE_TLB */
360
361 /*
362 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
363 *
364 * First translate CS:rIP to a physical address.
365 */
366 uint32_t cbToTryRead;
367 RTGCPTR GCPtrPC;
368 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
369 {
370 cbToTryRead = GUEST_PAGE_SIZE;
371 GCPtrPC = pVCpu->cpum.GstCtx.rip;
372 if (IEM_IS_CANONICAL(GCPtrPC))
373 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
374 else
375 return iemRaiseGeneralProtectionFault0(pVCpu);
376 }
377 else
378 {
379 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
380 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
381 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
382 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
383 else
384 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
385 if (cbToTryRead) { /* likely */ }
386 else /* overflowed */
387 {
388 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
389 cbToTryRead = UINT32_MAX;
390 }
391 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
392 Assert(GCPtrPC <= UINT32_MAX);
393 }
394
395 PGMPTWALK Walk;
396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
397 if (RT_SUCCESS(rc))
398 Assert(Walk.fSucceeded); /* probable. */
399 else
400 {
401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
402#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
403 if (Walk.fFailed & PGM_WALKFAIL_EPT)
404 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
405#endif
406 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
407 }
408 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
409 else
410 {
411 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
413 if (Walk.fFailed & PGM_WALKFAIL_EPT)
414 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
415#endif
416 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
417 }
418 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
419 else
420 {
421 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
422#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
423 if (Walk.fFailed & PGM_WALKFAIL_EPT)
424 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
425#endif
426 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
427 }
428 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
429 /** @todo Check reserved bits and such stuff. PGM is better at doing
430 * that, so do it when implementing the guest virtual address
431 * TLB... */
432
433 /*
434 * Read the bytes at this address.
435 */
436 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
437 if (cbToTryRead > cbLeftOnPage)
438 cbToTryRead = cbLeftOnPage;
439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
441
442 if (!pVCpu->iem.s.fBypassHandlers)
443 {
444 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
446 { /* likely */ }
447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
448 {
449 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
450 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
452 }
453 else
454 {
455 Log((RT_SUCCESS(rcStrict)
456 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
457 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
458 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
459 return rcStrict;
460 }
461 }
462 else
463 {
464 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
465 if (RT_SUCCESS(rc))
466 { /* likely */ }
467 else
468 {
469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
470 GCPtrPC, GCPhys, rc, cbToTryRead));
471 return rc;
472 }
473 }
474 pVCpu->iem.s.cbOpcode = cbToTryRead;
475#endif /* !IEM_WITH_CODE_TLB */
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Invalidates the IEM TLBs.
482 *
483 * This is called internally as well as by PGM when moving GC mappings.
484 *
485 * @returns
486 * @param pVCpu The cross context virtual CPU structure of the calling
487 * thread.
488 */
489VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
490{
491#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
492 Log10(("IEMTlbInvalidateAll\n"));
493# ifdef IEM_WITH_CODE_TLB
494 pVCpu->iem.s.cbInstrBufTotal = 0;
495 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
496 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
497 { /* very likely */ }
498 else
499 {
500 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
501 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
502 while (i-- > 0)
503 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
504 }
505# endif
506
507# ifdef IEM_WITH_DATA_TLB
508 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
509 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
510 { /* very likely */ }
511 else
512 {
513 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
515 while (i-- > 0)
516 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
517 }
518# endif
519#else
520 RT_NOREF(pVCpu);
521#endif
522}
523
524
525/**
526 * Invalidates a page in the TLBs.
527 *
528 * @param pVCpu The cross context virtual CPU structure of the calling
529 * thread.
530 * @param GCPtr The address of the page to invalidate
531 */
532VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
533{
534#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
535 GCPtr = (GCPtr << 16) >> (X86_PAGE_SHIFT + 16);
536 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
537 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
538 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
539 uintptr_t idx = (uint8_t)GCPtr;
540
541# ifdef IEM_WITH_CODE_TLB
542 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
543 {
544 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
545 if (GCPtr == ((pVCpu->iem.s.uInstrBufPc << 16) >> (X86_PAGE_SHIFT + 16)))
546 pVCpu->iem.s.cbInstrBufTotal = 0;
547 }
548# endif
549
550# ifdef IEM_WITH_DATA_TLB
551 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
552 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
553# endif
554#else
555 NOREF(pVCpu); NOREF(GCPtr);
556#endif
557}
558
559
560#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
561/**
562 * Invalid both TLBs slow fashion following a rollover.
563 *
564 * Worker for IEMTlbInvalidateAllPhysical,
565 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
566 * iemMemMapJmp and others.
567 *
568 * @thread EMT(pVCpu)
569 */
570static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
571{
572 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
573 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
574 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
575
576 unsigned i;
577# ifdef IEM_WITH_CODE_TLB
578 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
579 while (i-- > 0)
580 {
581 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
582 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
583 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
584 }
585# endif
586# ifdef IEM_WITH_DATA_TLB
587 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
588 while (i-- > 0)
589 {
590 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
591 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
592 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
593 }
594# endif
595
596}
597#endif
598
599
600/**
601 * Invalidates the host physical aspects of the IEM TLBs.
602 *
603 * This is called internally as well as by PGM when moving GC mappings.
604 *
605 * @param pVCpu The cross context virtual CPU structure of the calling
606 * thread.
607 * @note Currently not used.
608 */
609VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
610{
611#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
612 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
613 Log10(("IEMTlbInvalidateAllPhysical\n"));
614
615# ifdef IEM_WITH_CODE_TLB
616 pVCpu->iem.s.cbInstrBufTotal = 0;
617# endif
618 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
619 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
620 {
621 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
622 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
623 }
624 else
625 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
626#else
627 NOREF(pVCpu);
628#endif
629}
630
631
632/**
633 * Invalidates the host physical aspects of the IEM TLBs.
634 *
635 * This is called internally as well as by PGM when moving GC mappings.
636 *
637 * @param pVM The cross context VM structure.
638 * @param idCpuCaller The ID of the calling EMT if available to the caller,
639 * otherwise NIL_VMCPUID.
640 *
641 * @remarks Caller holds the PGM lock.
642 */
643VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
644{
645#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
646 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
647 if (pVCpuCaller)
648 VMCPU_ASSERT_EMT(pVCpuCaller);
649 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
650
651 VMCC_FOR_EACH_VMCPU(pVM)
652 {
653# ifdef IEM_WITH_CODE_TLB
654 if (pVCpuCaller == pVCpu)
655 pVCpu->iem.s.cbInstrBufTotal = 0;
656# endif
657
658 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
659 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
660 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
661 { /* likely */}
662 else if (pVCpuCaller == pVCpu)
663 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
664 else
665 {
666 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
667 continue;
668 }
669 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
670 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
671 }
672 VMCC_FOR_EACH_VMCPU_END(pVM);
673
674#else
675 RT_NOREF(pVM, idCpuCaller);
676#endif
677}
678
679#ifdef IEM_WITH_CODE_TLB
680
681/**
682 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
683 * failure and jumps.
684 *
685 * We end up here for a number of reasons:
686 * - pbInstrBuf isn't yet initialized.
687 * - Advancing beyond the buffer boundrary (e.g. cross page).
688 * - Advancing beyond the CS segment limit.
689 * - Fetching from non-mappable page (e.g. MMIO).
690 *
691 * @param pVCpu The cross context virtual CPU structure of the
692 * calling thread.
693 * @param pvDst Where to return the bytes.
694 * @param cbDst Number of bytes to read.
695 *
696 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
697 */
698void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
699{
700#ifdef IN_RING3
701 for (;;)
702 {
703 Assert(cbDst <= 8);
704 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
705
706 /*
707 * We might have a partial buffer match, deal with that first to make the
708 * rest simpler. This is the first part of the cross page/buffer case.
709 */
710 if (pVCpu->iem.s.pbInstrBuf != NULL)
711 {
712 if (offBuf < pVCpu->iem.s.cbInstrBuf)
713 {
714 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
715 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
716 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
717
718 cbDst -= cbCopy;
719 pvDst = (uint8_t *)pvDst + cbCopy;
720 offBuf += cbCopy;
721 pVCpu->iem.s.offInstrNextByte += offBuf;
722 }
723 }
724
725 /*
726 * Check segment limit, figuring how much we're allowed to access at this point.
727 *
728 * We will fault immediately if RIP is past the segment limit / in non-canonical
729 * territory. If we do continue, there are one or more bytes to read before we
730 * end up in trouble and we need to do that first before faulting.
731 */
732 RTGCPTR GCPtrFirst;
733 uint32_t cbMaxRead;
734 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
735 {
736 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
737 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
738 { /* likely */ }
739 else
740 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
741 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
742 }
743 else
744 {
745 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
746 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
747 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
748 { /* likely */ }
749 else
750 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
751 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
752 if (cbMaxRead != 0)
753 { /* likely */ }
754 else
755 {
756 /* Overflowed because address is 0 and limit is max. */
757 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
758 cbMaxRead = X86_PAGE_SIZE;
759 }
760 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
761 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
762 if (cbMaxRead2 < cbMaxRead)
763 cbMaxRead = cbMaxRead2;
764 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
765 }
766
767 /*
768 * Get the TLB entry for this piece of code.
769 */
770 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
771 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
772 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
773 if (pTlbe->uTag == uTag)
774 {
775 /* likely when executing lots of code, otherwise unlikely */
776# ifdef VBOX_WITH_STATISTICS
777 pVCpu->iem.s.CodeTlb.cTlbHits++;
778# endif
779 }
780 else
781 {
782 pVCpu->iem.s.CodeTlb.cTlbMisses++;
783 PGMPTWALK Walk;
784 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
785 if (RT_FAILURE(rc))
786 {
787#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
788 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
789 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
790#endif
791 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
792 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
793 }
794
795 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
796 Assert(Walk.fSucceeded);
797 pTlbe->uTag = uTag;
798 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
799 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
800 pTlbe->GCPhys = Walk.GCPhys;
801 pTlbe->pbMappingR3 = NULL;
802 }
803
804 /*
805 * Check TLB page table level access flags.
806 */
807 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
808 {
809 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
810 {
811 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
812 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
813 }
814 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
815 {
816 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
817 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
818 }
819 }
820
821 /*
822 * Look up the physical page info if necessary.
823 */
824 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
825 { /* not necessary */ }
826 else
827 {
828 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
829 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
830 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
831 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
832 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
833 { /* likely */ }
834 else
835 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
836 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
837 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
838 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
839 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
840 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
841 }
842
843# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
844 /*
845 * Try do a direct read using the pbMappingR3 pointer.
846 */
847 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
848 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
849 {
850 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
851 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
852 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
853 {
854 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
855 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
856 }
857 else
858 {
859 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
860 Assert(cbInstr < cbMaxRead);
861 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
862 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
863 }
864 if (cbDst <= cbMaxRead)
865 {
866 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
867 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
868 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
869 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
870 return;
871 }
872 pVCpu->iem.s.pbInstrBuf = NULL;
873
874 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
875 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
876 }
877 else
878# endif
879#if 0
880 /*
881 * If there is no special read handling, so we can read a bit more and
882 * put it in the prefetch buffer.
883 */
884 if ( cbDst < cbMaxRead
885 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
886 {
887 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
888 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
889 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
890 { /* likely */ }
891 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
892 {
893 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
894 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
895 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
896 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
897 }
898 else
899 {
900 Log((RT_SUCCESS(rcStrict)
901 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
902 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
903 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
904 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
905 }
906 }
907 /*
908 * Special read handling, so only read exactly what's needed.
909 * This is a highly unlikely scenario.
910 */
911 else
912#endif
913 {
914 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
915 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
916 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
917 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
918 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
919 { /* likely */ }
920 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
921 {
922 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
923 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
924 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
925 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
926 }
927 else
928 {
929 Log((RT_SUCCESS(rcStrict)
930 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
931 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
932 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
933 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
934 }
935 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
936 if (cbToRead == cbDst)
937 return;
938 }
939
940 /*
941 * More to read, loop.
942 */
943 cbDst -= cbMaxRead;
944 pvDst = (uint8_t *)pvDst + cbMaxRead;
945 }
946#else
947 RT_NOREF(pvDst, cbDst);
948 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
949#endif
950}
951
952#else
953
954/**
955 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
956 * exception if it fails.
957 *
958 * @returns Strict VBox status code.
959 * @param pVCpu The cross context virtual CPU structure of the
960 * calling thread.
961 * @param cbMin The minimum number of bytes relative offOpcode
962 * that must be read.
963 */
964VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
965{
966 /*
967 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
968 *
969 * First translate CS:rIP to a physical address.
970 */
971 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
972 uint32_t cbToTryRead;
973 RTGCPTR GCPtrNext;
974 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
975 {
976 cbToTryRead = GUEST_PAGE_SIZE;
977 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
978 if (!IEM_IS_CANONICAL(GCPtrNext))
979 return iemRaiseGeneralProtectionFault0(pVCpu);
980 }
981 else
982 {
983 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
984 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
985 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
986 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
987 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
988 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
989 if (!cbToTryRead) /* overflowed */
990 {
991 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
992 cbToTryRead = UINT32_MAX;
993 /** @todo check out wrapping around the code segment. */
994 }
995 if (cbToTryRead < cbMin - cbLeft)
996 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
997 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
998 }
999
1000 /* Only read up to the end of the page, and make sure we don't read more
1001 than the opcode buffer can hold. */
1002 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1003 if (cbToTryRead > cbLeftOnPage)
1004 cbToTryRead = cbLeftOnPage;
1005 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1006 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1007/** @todo r=bird: Convert assertion into undefined opcode exception? */
1008 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1009
1010 PGMPTWALK Walk;
1011 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1012 if (RT_FAILURE(rc))
1013 {
1014 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1015#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1016 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1017 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1018#endif
1019 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1020 }
1021 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1022 {
1023 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1024#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1025 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1026 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1027#endif
1028 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1029 }
1030 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1031 {
1032 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1033#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1034 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1035 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1036#endif
1037 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1038 }
1039 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1040 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1041 /** @todo Check reserved bits and such stuff. PGM is better at doing
1042 * that, so do it when implementing the guest virtual address
1043 * TLB... */
1044
1045 /*
1046 * Read the bytes at this address.
1047 *
1048 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1049 * and since PATM should only patch the start of an instruction there
1050 * should be no need to check again here.
1051 */
1052 if (!pVCpu->iem.s.fBypassHandlers)
1053 {
1054 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1055 cbToTryRead, PGMACCESSORIGIN_IEM);
1056 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1057 { /* likely */ }
1058 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1059 {
1060 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1061 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1062 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1063 }
1064 else
1065 {
1066 Log((RT_SUCCESS(rcStrict)
1067 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1068 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1069 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1070 return rcStrict;
1071 }
1072 }
1073 else
1074 {
1075 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1076 if (RT_SUCCESS(rc))
1077 { /* likely */ }
1078 else
1079 {
1080 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1081 return rc;
1082 }
1083 }
1084 pVCpu->iem.s.cbOpcode += cbToTryRead;
1085 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1086
1087 return VINF_SUCCESS;
1088}
1089
1090#endif /* !IEM_WITH_CODE_TLB */
1091#ifndef IEM_WITH_SETJMP
1092
1093/**
1094 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1095 *
1096 * @returns Strict VBox status code.
1097 * @param pVCpu The cross context virtual CPU structure of the
1098 * calling thread.
1099 * @param pb Where to return the opcode byte.
1100 */
1101VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1102{
1103 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1104 if (rcStrict == VINF_SUCCESS)
1105 {
1106 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1107 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1108 pVCpu->iem.s.offOpcode = offOpcode + 1;
1109 }
1110 else
1111 *pb = 0;
1112 return rcStrict;
1113}
1114
1115#else /* IEM_WITH_SETJMP */
1116
1117/**
1118 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1119 *
1120 * @returns The opcode byte.
1121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1122 */
1123uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1124{
1125# ifdef IEM_WITH_CODE_TLB
1126 uint8_t u8;
1127 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1128 return u8;
1129# else
1130 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1131 if (rcStrict == VINF_SUCCESS)
1132 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1133 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1134# endif
1135}
1136
1137#endif /* IEM_WITH_SETJMP */
1138
1139#ifndef IEM_WITH_SETJMP
1140
1141/**
1142 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1143 *
1144 * @returns Strict VBox status code.
1145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1146 * @param pu16 Where to return the opcode dword.
1147 */
1148VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1149{
1150 uint8_t u8;
1151 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1152 if (rcStrict == VINF_SUCCESS)
1153 *pu16 = (int8_t)u8;
1154 return rcStrict;
1155}
1156
1157
1158/**
1159 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1160 *
1161 * @returns Strict VBox status code.
1162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1163 * @param pu32 Where to return the opcode dword.
1164 */
1165VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1166{
1167 uint8_t u8;
1168 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1169 if (rcStrict == VINF_SUCCESS)
1170 *pu32 = (int8_t)u8;
1171 return rcStrict;
1172}
1173
1174
1175/**
1176 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1177 *
1178 * @returns Strict VBox status code.
1179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1180 * @param pu64 Where to return the opcode qword.
1181 */
1182VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1183{
1184 uint8_t u8;
1185 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1186 if (rcStrict == VINF_SUCCESS)
1187 *pu64 = (int8_t)u8;
1188 return rcStrict;
1189}
1190
1191#endif /* !IEM_WITH_SETJMP */
1192
1193
1194#ifndef IEM_WITH_SETJMP
1195
1196/**
1197 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1198 *
1199 * @returns Strict VBox status code.
1200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1201 * @param pu16 Where to return the opcode word.
1202 */
1203VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1204{
1205 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1206 if (rcStrict == VINF_SUCCESS)
1207 {
1208 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1209# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1210 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1211# else
1212 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1213# endif
1214 pVCpu->iem.s.offOpcode = offOpcode + 2;
1215 }
1216 else
1217 *pu16 = 0;
1218 return rcStrict;
1219}
1220
1221#else /* IEM_WITH_SETJMP */
1222
1223/**
1224 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1225 *
1226 * @returns The opcode word.
1227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1228 */
1229uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1230{
1231# ifdef IEM_WITH_CODE_TLB
1232 uint16_t u16;
1233 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1234 return u16;
1235# else
1236 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1237 if (rcStrict == VINF_SUCCESS)
1238 {
1239 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1240 pVCpu->iem.s.offOpcode += 2;
1241# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1242 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1243# else
1244 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1245# endif
1246 }
1247 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1248# endif
1249}
1250
1251#endif /* IEM_WITH_SETJMP */
1252
1253#ifndef IEM_WITH_SETJMP
1254
1255/**
1256 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1257 *
1258 * @returns Strict VBox status code.
1259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1260 * @param pu32 Where to return the opcode double word.
1261 */
1262VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1263{
1264 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1265 if (rcStrict == VINF_SUCCESS)
1266 {
1267 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1268 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1269 pVCpu->iem.s.offOpcode = offOpcode + 2;
1270 }
1271 else
1272 *pu32 = 0;
1273 return rcStrict;
1274}
1275
1276
1277/**
1278 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1279 *
1280 * @returns Strict VBox status code.
1281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1282 * @param pu64 Where to return the opcode quad word.
1283 */
1284VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1285{
1286 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1287 if (rcStrict == VINF_SUCCESS)
1288 {
1289 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1290 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1291 pVCpu->iem.s.offOpcode = offOpcode + 2;
1292 }
1293 else
1294 *pu64 = 0;
1295 return rcStrict;
1296}
1297
1298#endif /* !IEM_WITH_SETJMP */
1299
1300#ifndef IEM_WITH_SETJMP
1301
1302/**
1303 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1304 *
1305 * @returns Strict VBox status code.
1306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1307 * @param pu32 Where to return the opcode dword.
1308 */
1309VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1310{
1311 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1312 if (rcStrict == VINF_SUCCESS)
1313 {
1314 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1315# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1316 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1317# else
1318 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1319 pVCpu->iem.s.abOpcode[offOpcode + 1],
1320 pVCpu->iem.s.abOpcode[offOpcode + 2],
1321 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1322# endif
1323 pVCpu->iem.s.offOpcode = offOpcode + 4;
1324 }
1325 else
1326 *pu32 = 0;
1327 return rcStrict;
1328}
1329
1330#else /* IEM_WITH_SETJMP */
1331
1332/**
1333 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1334 *
1335 * @returns The opcode dword.
1336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1337 */
1338uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1339{
1340# ifdef IEM_WITH_CODE_TLB
1341 uint32_t u32;
1342 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1343 return u32;
1344# else
1345 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1346 if (rcStrict == VINF_SUCCESS)
1347 {
1348 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1349 pVCpu->iem.s.offOpcode = offOpcode + 4;
1350# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1351 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1352# else
1353 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1354 pVCpu->iem.s.abOpcode[offOpcode + 1],
1355 pVCpu->iem.s.abOpcode[offOpcode + 2],
1356 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1357# endif
1358 }
1359 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1360# endif
1361}
1362
1363#endif /* IEM_WITH_SETJMP */
1364
1365#ifndef IEM_WITH_SETJMP
1366
1367/**
1368 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1369 *
1370 * @returns Strict VBox status code.
1371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1372 * @param pu64 Where to return the opcode dword.
1373 */
1374VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1375{
1376 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1377 if (rcStrict == VINF_SUCCESS)
1378 {
1379 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1380 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1381 pVCpu->iem.s.abOpcode[offOpcode + 1],
1382 pVCpu->iem.s.abOpcode[offOpcode + 2],
1383 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1384 pVCpu->iem.s.offOpcode = offOpcode + 4;
1385 }
1386 else
1387 *pu64 = 0;
1388 return rcStrict;
1389}
1390
1391
1392/**
1393 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1394 *
1395 * @returns Strict VBox status code.
1396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1397 * @param pu64 Where to return the opcode qword.
1398 */
1399VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1400{
1401 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1402 if (rcStrict == VINF_SUCCESS)
1403 {
1404 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1405 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1406 pVCpu->iem.s.abOpcode[offOpcode + 1],
1407 pVCpu->iem.s.abOpcode[offOpcode + 2],
1408 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1409 pVCpu->iem.s.offOpcode = offOpcode + 4;
1410 }
1411 else
1412 *pu64 = 0;
1413 return rcStrict;
1414}
1415
1416#endif /* !IEM_WITH_SETJMP */
1417
1418#ifndef IEM_WITH_SETJMP
1419
1420/**
1421 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1422 *
1423 * @returns Strict VBox status code.
1424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1425 * @param pu64 Where to return the opcode qword.
1426 */
1427VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1428{
1429 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1430 if (rcStrict == VINF_SUCCESS)
1431 {
1432 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1433# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1434 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1435# else
1436 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1437 pVCpu->iem.s.abOpcode[offOpcode + 1],
1438 pVCpu->iem.s.abOpcode[offOpcode + 2],
1439 pVCpu->iem.s.abOpcode[offOpcode + 3],
1440 pVCpu->iem.s.abOpcode[offOpcode + 4],
1441 pVCpu->iem.s.abOpcode[offOpcode + 5],
1442 pVCpu->iem.s.abOpcode[offOpcode + 6],
1443 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1444# endif
1445 pVCpu->iem.s.offOpcode = offOpcode + 8;
1446 }
1447 else
1448 *pu64 = 0;
1449 return rcStrict;
1450}
1451
1452#else /* IEM_WITH_SETJMP */
1453
1454/**
1455 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1456 *
1457 * @returns The opcode qword.
1458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1459 */
1460uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1461{
1462# ifdef IEM_WITH_CODE_TLB
1463 uint64_t u64;
1464 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1465 return u64;
1466# else
1467 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1468 if (rcStrict == VINF_SUCCESS)
1469 {
1470 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1471 pVCpu->iem.s.offOpcode = offOpcode + 8;
1472# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1473 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1474# else
1475 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1476 pVCpu->iem.s.abOpcode[offOpcode + 1],
1477 pVCpu->iem.s.abOpcode[offOpcode + 2],
1478 pVCpu->iem.s.abOpcode[offOpcode + 3],
1479 pVCpu->iem.s.abOpcode[offOpcode + 4],
1480 pVCpu->iem.s.abOpcode[offOpcode + 5],
1481 pVCpu->iem.s.abOpcode[offOpcode + 6],
1482 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1483# endif
1484 }
1485 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1486# endif
1487}
1488
1489#endif /* IEM_WITH_SETJMP */
1490
1491
1492
1493/** @name Misc Worker Functions.
1494 * @{
1495 */
1496
1497/**
1498 * Gets the exception class for the specified exception vector.
1499 *
1500 * @returns The class of the specified exception.
1501 * @param uVector The exception vector.
1502 */
1503static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1504{
1505 Assert(uVector <= X86_XCPT_LAST);
1506 switch (uVector)
1507 {
1508 case X86_XCPT_DE:
1509 case X86_XCPT_TS:
1510 case X86_XCPT_NP:
1511 case X86_XCPT_SS:
1512 case X86_XCPT_GP:
1513 case X86_XCPT_SX: /* AMD only */
1514 return IEMXCPTCLASS_CONTRIBUTORY;
1515
1516 case X86_XCPT_PF:
1517 case X86_XCPT_VE: /* Intel only */
1518 return IEMXCPTCLASS_PAGE_FAULT;
1519
1520 case X86_XCPT_DF:
1521 return IEMXCPTCLASS_DOUBLE_FAULT;
1522 }
1523 return IEMXCPTCLASS_BENIGN;
1524}
1525
1526
1527/**
1528 * Evaluates how to handle an exception caused during delivery of another event
1529 * (exception / interrupt).
1530 *
1531 * @returns How to handle the recursive exception.
1532 * @param pVCpu The cross context virtual CPU structure of the
1533 * calling thread.
1534 * @param fPrevFlags The flags of the previous event.
1535 * @param uPrevVector The vector of the previous event.
1536 * @param fCurFlags The flags of the current exception.
1537 * @param uCurVector The vector of the current exception.
1538 * @param pfXcptRaiseInfo Where to store additional information about the
1539 * exception condition. Optional.
1540 */
1541VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1542 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1543{
1544 /*
1545 * Only CPU exceptions can be raised while delivering other events, software interrupt
1546 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1547 */
1548 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1549 Assert(pVCpu); RT_NOREF(pVCpu);
1550 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1551
1552 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1553 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1554 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1555 {
1556 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1557 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1558 {
1559 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1560 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1561 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1562 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1563 {
1564 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1565 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1566 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1567 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1568 uCurVector, pVCpu->cpum.GstCtx.cr2));
1569 }
1570 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1571 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1572 {
1573 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1574 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1575 }
1576 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1577 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1578 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1579 {
1580 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1581 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1582 }
1583 }
1584 else
1585 {
1586 if (uPrevVector == X86_XCPT_NMI)
1587 {
1588 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1589 if (uCurVector == X86_XCPT_PF)
1590 {
1591 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1592 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1593 }
1594 }
1595 else if ( uPrevVector == X86_XCPT_AC
1596 && uCurVector == X86_XCPT_AC)
1597 {
1598 enmRaise = IEMXCPTRAISE_CPU_HANG;
1599 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1600 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1601 }
1602 }
1603 }
1604 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1605 {
1606 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1607 if (uCurVector == X86_XCPT_PF)
1608 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1609 }
1610 else
1611 {
1612 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1613 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1614 }
1615
1616 if (pfXcptRaiseInfo)
1617 *pfXcptRaiseInfo = fRaiseInfo;
1618 return enmRaise;
1619}
1620
1621
1622/**
1623 * Enters the CPU shutdown state initiated by a triple fault or other
1624 * unrecoverable conditions.
1625 *
1626 * @returns Strict VBox status code.
1627 * @param pVCpu The cross context virtual CPU structure of the
1628 * calling thread.
1629 */
1630static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1631{
1632 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1633 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1634
1635 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1636 {
1637 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1638 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1639 }
1640
1641 RT_NOREF(pVCpu);
1642 return VINF_EM_TRIPLE_FAULT;
1643}
1644
1645
1646/**
1647 * Validates a new SS segment.
1648 *
1649 * @returns VBox strict status code.
1650 * @param pVCpu The cross context virtual CPU structure of the
1651 * calling thread.
1652 * @param NewSS The new SS selctor.
1653 * @param uCpl The CPL to load the stack for.
1654 * @param pDesc Where to return the descriptor.
1655 */
1656static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1657{
1658 /* Null selectors are not allowed (we're not called for dispatching
1659 interrupts with SS=0 in long mode). */
1660 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1661 {
1662 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1663 return iemRaiseTaskSwitchFault0(pVCpu);
1664 }
1665
1666 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1667 if ((NewSS & X86_SEL_RPL) != uCpl)
1668 {
1669 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1670 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1671 }
1672
1673 /*
1674 * Read the descriptor.
1675 */
1676 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1677 if (rcStrict != VINF_SUCCESS)
1678 return rcStrict;
1679
1680 /*
1681 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1682 */
1683 if (!pDesc->Legacy.Gen.u1DescType)
1684 {
1685 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1686 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1687 }
1688
1689 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1690 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1691 {
1692 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1693 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1694 }
1695 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1696 {
1697 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1698 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1699 }
1700
1701 /* Is it there? */
1702 /** @todo testcase: Is this checked before the canonical / limit check below? */
1703 if (!pDesc->Legacy.Gen.u1Present)
1704 {
1705 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1706 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1707 }
1708
1709 return VINF_SUCCESS;
1710}
1711
1712/** @} */
1713
1714
1715/** @name Raising Exceptions.
1716 *
1717 * @{
1718 */
1719
1720
1721/**
1722 * Loads the specified stack far pointer from the TSS.
1723 *
1724 * @returns VBox strict status code.
1725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1726 * @param uCpl The CPL to load the stack for.
1727 * @param pSelSS Where to return the new stack segment.
1728 * @param puEsp Where to return the new stack pointer.
1729 */
1730static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1731{
1732 VBOXSTRICTRC rcStrict;
1733 Assert(uCpl < 4);
1734
1735 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1736 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1737 {
1738 /*
1739 * 16-bit TSS (X86TSS16).
1740 */
1741 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1742 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1743 {
1744 uint32_t off = uCpl * 4 + 2;
1745 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1746 {
1747 /** @todo check actual access pattern here. */
1748 uint32_t u32Tmp = 0; /* gcc maybe... */
1749 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1750 if (rcStrict == VINF_SUCCESS)
1751 {
1752 *puEsp = RT_LOWORD(u32Tmp);
1753 *pSelSS = RT_HIWORD(u32Tmp);
1754 return VINF_SUCCESS;
1755 }
1756 }
1757 else
1758 {
1759 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1760 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1761 }
1762 break;
1763 }
1764
1765 /*
1766 * 32-bit TSS (X86TSS32).
1767 */
1768 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1769 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1770 {
1771 uint32_t off = uCpl * 8 + 4;
1772 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1773 {
1774/** @todo check actual access pattern here. */
1775 uint64_t u64Tmp;
1776 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1777 if (rcStrict == VINF_SUCCESS)
1778 {
1779 *puEsp = u64Tmp & UINT32_MAX;
1780 *pSelSS = (RTSEL)(u64Tmp >> 32);
1781 return VINF_SUCCESS;
1782 }
1783 }
1784 else
1785 {
1786 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1787 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1788 }
1789 break;
1790 }
1791
1792 default:
1793 AssertFailed();
1794 rcStrict = VERR_IEM_IPE_4;
1795 break;
1796 }
1797
1798 *puEsp = 0; /* make gcc happy */
1799 *pSelSS = 0; /* make gcc happy */
1800 return rcStrict;
1801}
1802
1803
1804/**
1805 * Loads the specified stack pointer from the 64-bit TSS.
1806 *
1807 * @returns VBox strict status code.
1808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1809 * @param uCpl The CPL to load the stack for.
1810 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1811 * @param puRsp Where to return the new stack pointer.
1812 */
1813static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1814{
1815 Assert(uCpl < 4);
1816 Assert(uIst < 8);
1817 *puRsp = 0; /* make gcc happy */
1818
1819 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1820 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1821
1822 uint32_t off;
1823 if (uIst)
1824 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1825 else
1826 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1827 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1828 {
1829 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1830 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1831 }
1832
1833 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1834}
1835
1836
1837/**
1838 * Adjust the CPU state according to the exception being raised.
1839 *
1840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1841 * @param u8Vector The exception that has been raised.
1842 */
1843DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1844{
1845 switch (u8Vector)
1846 {
1847 case X86_XCPT_DB:
1848 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1849 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1850 break;
1851 /** @todo Read the AMD and Intel exception reference... */
1852 }
1853}
1854
1855
1856/**
1857 * Implements exceptions and interrupts for real mode.
1858 *
1859 * @returns VBox strict status code.
1860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1861 * @param cbInstr The number of bytes to offset rIP by in the return
1862 * address.
1863 * @param u8Vector The interrupt / exception vector number.
1864 * @param fFlags The flags.
1865 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1866 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1867 */
1868static VBOXSTRICTRC
1869iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1870 uint8_t cbInstr,
1871 uint8_t u8Vector,
1872 uint32_t fFlags,
1873 uint16_t uErr,
1874 uint64_t uCr2) RT_NOEXCEPT
1875{
1876 NOREF(uErr); NOREF(uCr2);
1877 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1878
1879 /*
1880 * Read the IDT entry.
1881 */
1882 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1883 {
1884 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1885 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1886 }
1887 RTFAR16 Idte;
1888 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1889 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1890 {
1891 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1892 return rcStrict;
1893 }
1894
1895 /*
1896 * Push the stack frame.
1897 */
1898 uint16_t *pu16Frame;
1899 uint64_t uNewRsp;
1900 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
1901 if (rcStrict != VINF_SUCCESS)
1902 return rcStrict;
1903
1904 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1905#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1906 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1907 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1908 fEfl |= UINT16_C(0xf000);
1909#endif
1910 pu16Frame[2] = (uint16_t)fEfl;
1911 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1912 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1913 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1914 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1915 return rcStrict;
1916
1917 /*
1918 * Load the vector address into cs:ip and make exception specific state
1919 * adjustments.
1920 */
1921 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1922 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1923 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1924 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1925 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1926 pVCpu->cpum.GstCtx.rip = Idte.off;
1927 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1928 IEMMISC_SET_EFL(pVCpu, fEfl);
1929
1930 /** @todo do we actually do this in real mode? */
1931 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1932 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1933
1934 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1935}
1936
1937
1938/**
1939 * Loads a NULL data selector into when coming from V8086 mode.
1940 *
1941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1942 * @param pSReg Pointer to the segment register.
1943 */
1944DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1945{
1946 pSReg->Sel = 0;
1947 pSReg->ValidSel = 0;
1948 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1949 {
1950 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1951 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1952 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1953 }
1954 else
1955 {
1956 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1957 /** @todo check this on AMD-V */
1958 pSReg->u64Base = 0;
1959 pSReg->u32Limit = 0;
1960 }
1961}
1962
1963
1964/**
1965 * Loads a segment selector during a task switch in V8086 mode.
1966 *
1967 * @param pSReg Pointer to the segment register.
1968 * @param uSel The selector value to load.
1969 */
1970DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1971{
1972 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1973 pSReg->Sel = uSel;
1974 pSReg->ValidSel = uSel;
1975 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1976 pSReg->u64Base = uSel << 4;
1977 pSReg->u32Limit = 0xffff;
1978 pSReg->Attr.u = 0xf3;
1979}
1980
1981
1982/**
1983 * Loads a segment selector during a task switch in protected mode.
1984 *
1985 * In this task switch scenario, we would throw \#TS exceptions rather than
1986 * \#GPs.
1987 *
1988 * @returns VBox strict status code.
1989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1990 * @param pSReg Pointer to the segment register.
1991 * @param uSel The new selector value.
1992 *
1993 * @remarks This does _not_ handle CS or SS.
1994 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
1995 */
1996static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
1997{
1998 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
1999
2000 /* Null data selector. */
2001 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2002 {
2003 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2004 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2005 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2006 return VINF_SUCCESS;
2007 }
2008
2009 /* Fetch the descriptor. */
2010 IEMSELDESC Desc;
2011 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2012 if (rcStrict != VINF_SUCCESS)
2013 {
2014 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2015 VBOXSTRICTRC_VAL(rcStrict)));
2016 return rcStrict;
2017 }
2018
2019 /* Must be a data segment or readable code segment. */
2020 if ( !Desc.Legacy.Gen.u1DescType
2021 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2022 {
2023 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2024 Desc.Legacy.Gen.u4Type));
2025 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2026 }
2027
2028 /* Check privileges for data segments and non-conforming code segments. */
2029 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2030 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2031 {
2032 /* The RPL and the new CPL must be less than or equal to the DPL. */
2033 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2034 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2035 {
2036 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2037 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2038 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2039 }
2040 }
2041
2042 /* Is it there? */
2043 if (!Desc.Legacy.Gen.u1Present)
2044 {
2045 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2046 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2047 }
2048
2049 /* The base and limit. */
2050 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2051 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2052
2053 /*
2054 * Ok, everything checked out fine. Now set the accessed bit before
2055 * committing the result into the registers.
2056 */
2057 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2058 {
2059 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2060 if (rcStrict != VINF_SUCCESS)
2061 return rcStrict;
2062 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2063 }
2064
2065 /* Commit */
2066 pSReg->Sel = uSel;
2067 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2068 pSReg->u32Limit = cbLimit;
2069 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2070 pSReg->ValidSel = uSel;
2071 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2072 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2073 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2074
2075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2076 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2077 return VINF_SUCCESS;
2078}
2079
2080
2081/**
2082 * Performs a task switch.
2083 *
2084 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2085 * caller is responsible for performing the necessary checks (like DPL, TSS
2086 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2087 * reference for JMP, CALL, IRET.
2088 *
2089 * If the task switch is the due to a software interrupt or hardware exception,
2090 * the caller is responsible for validating the TSS selector and descriptor. See
2091 * Intel Instruction reference for INT n.
2092 *
2093 * @returns VBox strict status code.
2094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2095 * @param enmTaskSwitch The cause of the task switch.
2096 * @param uNextEip The EIP effective after the task switch.
2097 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2098 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2099 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2100 * @param SelTSS The TSS selector of the new task.
2101 * @param pNewDescTSS Pointer to the new TSS descriptor.
2102 */
2103VBOXSTRICTRC
2104iemTaskSwitch(PVMCPUCC pVCpu,
2105 IEMTASKSWITCH enmTaskSwitch,
2106 uint32_t uNextEip,
2107 uint32_t fFlags,
2108 uint16_t uErr,
2109 uint64_t uCr2,
2110 RTSEL SelTSS,
2111 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2112{
2113 Assert(!IEM_IS_REAL_MODE(pVCpu));
2114 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2115 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2116
2117 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2118 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2119 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2120 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2121 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2122
2123 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2124 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2125
2126 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2127 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2128
2129 /* Update CR2 in case it's a page-fault. */
2130 /** @todo This should probably be done much earlier in IEM/PGM. See
2131 * @bugref{5653#c49}. */
2132 if (fFlags & IEM_XCPT_FLAGS_CR2)
2133 pVCpu->cpum.GstCtx.cr2 = uCr2;
2134
2135 /*
2136 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2137 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2138 */
2139 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2140 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2141 if (uNewTSSLimit < uNewTSSLimitMin)
2142 {
2143 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2144 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2145 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2146 }
2147
2148 /*
2149 * Task switches in VMX non-root mode always cause task switches.
2150 * The new TSS must have been read and validated (DPL, limits etc.) before a
2151 * task-switch VM-exit commences.
2152 *
2153 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2154 */
2155 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2156 {
2157 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2158 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2159 }
2160
2161 /*
2162 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2163 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2164 */
2165 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2166 {
2167 uint32_t const uExitInfo1 = SelTSS;
2168 uint32_t uExitInfo2 = uErr;
2169 switch (enmTaskSwitch)
2170 {
2171 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2172 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2173 default: break;
2174 }
2175 if (fFlags & IEM_XCPT_FLAGS_ERR)
2176 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2177 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2178 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2179
2180 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2181 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2182 RT_NOREF2(uExitInfo1, uExitInfo2);
2183 }
2184
2185 /*
2186 * Check the current TSS limit. The last written byte to the current TSS during the
2187 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2188 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2189 *
2190 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2191 * end up with smaller than "legal" TSS limits.
2192 */
2193 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2194 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2195 if (uCurTSSLimit < uCurTSSLimitMin)
2196 {
2197 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2198 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2199 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2200 }
2201
2202 /*
2203 * Verify that the new TSS can be accessed and map it. Map only the required contents
2204 * and not the entire TSS.
2205 */
2206 void *pvNewTSS;
2207 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2208 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2209 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2210 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2211 * not perform correct translation if this happens. See Intel spec. 7.2.1
2212 * "Task-State Segment". */
2213 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2214 if (rcStrict != VINF_SUCCESS)
2215 {
2216 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2217 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2218 return rcStrict;
2219 }
2220
2221 /*
2222 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2223 */
2224 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2225 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2226 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2227 {
2228 PX86DESC pDescCurTSS;
2229 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2230 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2231 if (rcStrict != VINF_SUCCESS)
2232 {
2233 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2234 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2235 return rcStrict;
2236 }
2237
2238 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2239 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2240 if (rcStrict != VINF_SUCCESS)
2241 {
2242 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2243 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2244 return rcStrict;
2245 }
2246
2247 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2248 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2249 {
2250 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2251 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2252 u32EFlags &= ~X86_EFL_NT;
2253 }
2254 }
2255
2256 /*
2257 * Save the CPU state into the current TSS.
2258 */
2259 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2260 if (GCPtrNewTSS == GCPtrCurTSS)
2261 {
2262 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2263 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2264 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2265 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2266 pVCpu->cpum.GstCtx.ldtr.Sel));
2267 }
2268 if (fIsNewTSS386)
2269 {
2270 /*
2271 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2272 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2273 */
2274 void *pvCurTSS32;
2275 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2276 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2277 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2278 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2279 if (rcStrict != VINF_SUCCESS)
2280 {
2281 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2282 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2283 return rcStrict;
2284 }
2285
2286 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2287 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2288 pCurTSS32->eip = uNextEip;
2289 pCurTSS32->eflags = u32EFlags;
2290 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2291 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2292 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2293 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2294 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2295 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2296 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2297 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2298 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2299 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2300 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2301 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2302 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2303 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2304
2305 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2306 if (rcStrict != VINF_SUCCESS)
2307 {
2308 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2309 VBOXSTRICTRC_VAL(rcStrict)));
2310 return rcStrict;
2311 }
2312 }
2313 else
2314 {
2315 /*
2316 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2317 */
2318 void *pvCurTSS16;
2319 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2320 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2321 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2322 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2323 if (rcStrict != VINF_SUCCESS)
2324 {
2325 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2326 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2327 return rcStrict;
2328 }
2329
2330 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2331 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2332 pCurTSS16->ip = uNextEip;
2333 pCurTSS16->flags = u32EFlags;
2334 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2335 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2336 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2337 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2338 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2339 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2340 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2341 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2342 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2343 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2344 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2345 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2346
2347 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2348 if (rcStrict != VINF_SUCCESS)
2349 {
2350 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2351 VBOXSTRICTRC_VAL(rcStrict)));
2352 return rcStrict;
2353 }
2354 }
2355
2356 /*
2357 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2358 */
2359 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2360 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2361 {
2362 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2363 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2364 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2365 }
2366
2367 /*
2368 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2369 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2370 */
2371 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2372 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2373 bool fNewDebugTrap;
2374 if (fIsNewTSS386)
2375 {
2376 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2377 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2378 uNewEip = pNewTSS32->eip;
2379 uNewEflags = pNewTSS32->eflags;
2380 uNewEax = pNewTSS32->eax;
2381 uNewEcx = pNewTSS32->ecx;
2382 uNewEdx = pNewTSS32->edx;
2383 uNewEbx = pNewTSS32->ebx;
2384 uNewEsp = pNewTSS32->esp;
2385 uNewEbp = pNewTSS32->ebp;
2386 uNewEsi = pNewTSS32->esi;
2387 uNewEdi = pNewTSS32->edi;
2388 uNewES = pNewTSS32->es;
2389 uNewCS = pNewTSS32->cs;
2390 uNewSS = pNewTSS32->ss;
2391 uNewDS = pNewTSS32->ds;
2392 uNewFS = pNewTSS32->fs;
2393 uNewGS = pNewTSS32->gs;
2394 uNewLdt = pNewTSS32->selLdt;
2395 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2396 }
2397 else
2398 {
2399 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2400 uNewCr3 = 0;
2401 uNewEip = pNewTSS16->ip;
2402 uNewEflags = pNewTSS16->flags;
2403 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2404 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2405 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2406 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2407 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2408 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2409 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2410 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2411 uNewES = pNewTSS16->es;
2412 uNewCS = pNewTSS16->cs;
2413 uNewSS = pNewTSS16->ss;
2414 uNewDS = pNewTSS16->ds;
2415 uNewFS = 0;
2416 uNewGS = 0;
2417 uNewLdt = pNewTSS16->selLdt;
2418 fNewDebugTrap = false;
2419 }
2420
2421 if (GCPtrNewTSS == GCPtrCurTSS)
2422 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2423 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2424
2425 /*
2426 * We're done accessing the new TSS.
2427 */
2428 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2429 if (rcStrict != VINF_SUCCESS)
2430 {
2431 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2432 return rcStrict;
2433 }
2434
2435 /*
2436 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2437 */
2438 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2439 {
2440 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2441 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2442 if (rcStrict != VINF_SUCCESS)
2443 {
2444 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2445 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2446 return rcStrict;
2447 }
2448
2449 /* Check that the descriptor indicates the new TSS is available (not busy). */
2450 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2451 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2452 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2453
2454 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2455 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2456 if (rcStrict != VINF_SUCCESS)
2457 {
2458 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2459 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2460 return rcStrict;
2461 }
2462 }
2463
2464 /*
2465 * From this point on, we're technically in the new task. We will defer exceptions
2466 * until the completion of the task switch but before executing any instructions in the new task.
2467 */
2468 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2469 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2470 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2471 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2472 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2473 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2474 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2475
2476 /* Set the busy bit in TR. */
2477 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2478
2479 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2480 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2481 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2482 {
2483 uNewEflags |= X86_EFL_NT;
2484 }
2485
2486 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2487 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2488 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2489
2490 pVCpu->cpum.GstCtx.eip = uNewEip;
2491 pVCpu->cpum.GstCtx.eax = uNewEax;
2492 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2493 pVCpu->cpum.GstCtx.edx = uNewEdx;
2494 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2495 pVCpu->cpum.GstCtx.esp = uNewEsp;
2496 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2497 pVCpu->cpum.GstCtx.esi = uNewEsi;
2498 pVCpu->cpum.GstCtx.edi = uNewEdi;
2499
2500 uNewEflags &= X86_EFL_LIVE_MASK;
2501 uNewEflags |= X86_EFL_RA1_MASK;
2502 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2503
2504 /*
2505 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2506 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2507 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2508 */
2509 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2510 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2511
2512 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2513 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2514
2515 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2516 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2517
2518 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2519 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2520
2521 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2522 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2523
2524 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2525 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2526 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2527
2528 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2529 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2530 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2531 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2532
2533 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2534 {
2535 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2536 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2537 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2538 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2539 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2540 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2541 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2542 }
2543
2544 /*
2545 * Switch CR3 for the new task.
2546 */
2547 if ( fIsNewTSS386
2548 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2549 {
2550 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2551 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2552 AssertRCSuccessReturn(rc, rc);
2553
2554 /* Inform PGM. */
2555 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2556 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2557 AssertRCReturn(rc, rc);
2558 /* ignore informational status codes */
2559
2560 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2561 }
2562
2563 /*
2564 * Switch LDTR for the new task.
2565 */
2566 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2567 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2568 else
2569 {
2570 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2571
2572 IEMSELDESC DescNewLdt;
2573 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2574 if (rcStrict != VINF_SUCCESS)
2575 {
2576 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2577 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2578 return rcStrict;
2579 }
2580 if ( !DescNewLdt.Legacy.Gen.u1Present
2581 || DescNewLdt.Legacy.Gen.u1DescType
2582 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2583 {
2584 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2585 uNewLdt, DescNewLdt.Legacy.u));
2586 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2587 }
2588
2589 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2590 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2591 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2592 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2593 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2594 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2595 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2596 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2597 }
2598
2599 IEMSELDESC DescSS;
2600 if (IEM_IS_V86_MODE(pVCpu))
2601 {
2602 pVCpu->iem.s.uCpl = 3;
2603 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2604 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2605 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2606 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2607 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2608 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2609
2610 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2611 DescSS.Legacy.u = 0;
2612 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2613 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2614 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2615 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2616 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2617 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2618 DescSS.Legacy.Gen.u2Dpl = 3;
2619 }
2620 else
2621 {
2622 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2623
2624 /*
2625 * Load the stack segment for the new task.
2626 */
2627 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2628 {
2629 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2630 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2631 }
2632
2633 /* Fetch the descriptor. */
2634 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2635 if (rcStrict != VINF_SUCCESS)
2636 {
2637 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2638 VBOXSTRICTRC_VAL(rcStrict)));
2639 return rcStrict;
2640 }
2641
2642 /* SS must be a data segment and writable. */
2643 if ( !DescSS.Legacy.Gen.u1DescType
2644 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2645 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2646 {
2647 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2648 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2649 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2650 }
2651
2652 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2653 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2654 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2655 {
2656 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2657 uNewCpl));
2658 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2659 }
2660
2661 /* Is it there? */
2662 if (!DescSS.Legacy.Gen.u1Present)
2663 {
2664 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2665 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2666 }
2667
2668 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2669 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2670
2671 /* Set the accessed bit before committing the result into SS. */
2672 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2673 {
2674 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2675 if (rcStrict != VINF_SUCCESS)
2676 return rcStrict;
2677 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2678 }
2679
2680 /* Commit SS. */
2681 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2682 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2683 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2684 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2685 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2686 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2687 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2688
2689 /* CPL has changed, update IEM before loading rest of segments. */
2690 pVCpu->iem.s.uCpl = uNewCpl;
2691
2692 /*
2693 * Load the data segments for the new task.
2694 */
2695 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2696 if (rcStrict != VINF_SUCCESS)
2697 return rcStrict;
2698 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2699 if (rcStrict != VINF_SUCCESS)
2700 return rcStrict;
2701 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2702 if (rcStrict != VINF_SUCCESS)
2703 return rcStrict;
2704 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2705 if (rcStrict != VINF_SUCCESS)
2706 return rcStrict;
2707
2708 /*
2709 * Load the code segment for the new task.
2710 */
2711 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2712 {
2713 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2714 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2715 }
2716
2717 /* Fetch the descriptor. */
2718 IEMSELDESC DescCS;
2719 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2720 if (rcStrict != VINF_SUCCESS)
2721 {
2722 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2723 return rcStrict;
2724 }
2725
2726 /* CS must be a code segment. */
2727 if ( !DescCS.Legacy.Gen.u1DescType
2728 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2729 {
2730 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2731 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2732 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2733 }
2734
2735 /* For conforming CS, DPL must be less than or equal to the RPL. */
2736 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2737 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2738 {
2739 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2740 DescCS.Legacy.Gen.u2Dpl));
2741 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2742 }
2743
2744 /* For non-conforming CS, DPL must match RPL. */
2745 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2746 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2747 {
2748 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2749 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2750 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2751 }
2752
2753 /* Is it there? */
2754 if (!DescCS.Legacy.Gen.u1Present)
2755 {
2756 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2757 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2758 }
2759
2760 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2761 u64Base = X86DESC_BASE(&DescCS.Legacy);
2762
2763 /* Set the accessed bit before committing the result into CS. */
2764 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2765 {
2766 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2767 if (rcStrict != VINF_SUCCESS)
2768 return rcStrict;
2769 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2770 }
2771
2772 /* Commit CS. */
2773 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2774 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2775 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2776 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2777 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2778 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2779 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2780 }
2781
2782 /** @todo Debug trap. */
2783 if (fIsNewTSS386 && fNewDebugTrap)
2784 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2785
2786 /*
2787 * Construct the error code masks based on what caused this task switch.
2788 * See Intel Instruction reference for INT.
2789 */
2790 uint16_t uExt;
2791 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2792 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2793 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2794 {
2795 uExt = 1;
2796 }
2797 else
2798 uExt = 0;
2799
2800 /*
2801 * Push any error code on to the new stack.
2802 */
2803 if (fFlags & IEM_XCPT_FLAGS_ERR)
2804 {
2805 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2806 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2807 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2808
2809 /* Check that there is sufficient space on the stack. */
2810 /** @todo Factor out segment limit checking for normal/expand down segments
2811 * into a separate function. */
2812 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2813 {
2814 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2815 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2816 {
2817 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2818 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2819 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2820 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2821 }
2822 }
2823 else
2824 {
2825 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2826 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2827 {
2828 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2829 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2830 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2831 }
2832 }
2833
2834
2835 if (fIsNewTSS386)
2836 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2837 else
2838 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2839 if (rcStrict != VINF_SUCCESS)
2840 {
2841 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2842 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2843 return rcStrict;
2844 }
2845 }
2846
2847 /* Check the new EIP against the new CS limit. */
2848 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2849 {
2850 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2851 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2852 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2853 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2854 }
2855
2856 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2857 pVCpu->cpum.GstCtx.ss.Sel));
2858 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2859}
2860
2861
2862/**
2863 * Implements exceptions and interrupts for protected mode.
2864 *
2865 * @returns VBox strict status code.
2866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2867 * @param cbInstr The number of bytes to offset rIP by in the return
2868 * address.
2869 * @param u8Vector The interrupt / exception vector number.
2870 * @param fFlags The flags.
2871 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2872 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2873 */
2874static VBOXSTRICTRC
2875iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2876 uint8_t cbInstr,
2877 uint8_t u8Vector,
2878 uint32_t fFlags,
2879 uint16_t uErr,
2880 uint64_t uCr2) RT_NOEXCEPT
2881{
2882 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2883
2884 /*
2885 * Read the IDT entry.
2886 */
2887 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2888 {
2889 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2890 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2891 }
2892 X86DESC Idte;
2893 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2894 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2895 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2896 {
2897 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2898 return rcStrict;
2899 }
2900 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2901 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2902 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2903
2904 /*
2905 * Check the descriptor type, DPL and such.
2906 * ASSUMES this is done in the same order as described for call-gate calls.
2907 */
2908 if (Idte.Gate.u1DescType)
2909 {
2910 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2911 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2912 }
2913 bool fTaskGate = false;
2914 uint8_t f32BitGate = true;
2915 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2916 switch (Idte.Gate.u4Type)
2917 {
2918 case X86_SEL_TYPE_SYS_UNDEFINED:
2919 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2920 case X86_SEL_TYPE_SYS_LDT:
2921 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2922 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2923 case X86_SEL_TYPE_SYS_UNDEFINED2:
2924 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2925 case X86_SEL_TYPE_SYS_UNDEFINED3:
2926 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2927 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2928 case X86_SEL_TYPE_SYS_UNDEFINED4:
2929 {
2930 /** @todo check what actually happens when the type is wrong...
2931 * esp. call gates. */
2932 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2933 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2934 }
2935
2936 case X86_SEL_TYPE_SYS_286_INT_GATE:
2937 f32BitGate = false;
2938 RT_FALL_THRU();
2939 case X86_SEL_TYPE_SYS_386_INT_GATE:
2940 fEflToClear |= X86_EFL_IF;
2941 break;
2942
2943 case X86_SEL_TYPE_SYS_TASK_GATE:
2944 fTaskGate = true;
2945#ifndef IEM_IMPLEMENTS_TASKSWITCH
2946 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2947#endif
2948 break;
2949
2950 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2951 f32BitGate = false;
2952 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2953 break;
2954
2955 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2956 }
2957
2958 /* Check DPL against CPL if applicable. */
2959 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2960 {
2961 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2962 {
2963 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2964 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2965 }
2966 }
2967
2968 /* Is it there? */
2969 if (!Idte.Gate.u1Present)
2970 {
2971 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2972 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2973 }
2974
2975 /* Is it a task-gate? */
2976 if (fTaskGate)
2977 {
2978 /*
2979 * Construct the error code masks based on what caused this task switch.
2980 * See Intel Instruction reference for INT.
2981 */
2982 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2983 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2984 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
2985 RTSEL SelTSS = Idte.Gate.u16Sel;
2986
2987 /*
2988 * Fetch the TSS descriptor in the GDT.
2989 */
2990 IEMSELDESC DescTSS;
2991 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
2992 if (rcStrict != VINF_SUCCESS)
2993 {
2994 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
2995 VBOXSTRICTRC_VAL(rcStrict)));
2996 return rcStrict;
2997 }
2998
2999 /* The TSS descriptor must be a system segment and be available (not busy). */
3000 if ( DescTSS.Legacy.Gen.u1DescType
3001 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3002 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3003 {
3004 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3005 u8Vector, SelTSS, DescTSS.Legacy.au64));
3006 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3007 }
3008
3009 /* The TSS must be present. */
3010 if (!DescTSS.Legacy.Gen.u1Present)
3011 {
3012 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3013 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3014 }
3015
3016 /* Do the actual task switch. */
3017 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3018 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3019 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3020 }
3021
3022 /* A null CS is bad. */
3023 RTSEL NewCS = Idte.Gate.u16Sel;
3024 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3025 {
3026 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3027 return iemRaiseGeneralProtectionFault0(pVCpu);
3028 }
3029
3030 /* Fetch the descriptor for the new CS. */
3031 IEMSELDESC DescCS;
3032 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3033 if (rcStrict != VINF_SUCCESS)
3034 {
3035 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3036 return rcStrict;
3037 }
3038
3039 /* Must be a code segment. */
3040 if (!DescCS.Legacy.Gen.u1DescType)
3041 {
3042 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3043 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3044 }
3045 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3046 {
3047 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3048 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3049 }
3050
3051 /* Don't allow lowering the privilege level. */
3052 /** @todo Does the lowering of privileges apply to software interrupts
3053 * only? This has bearings on the more-privileged or
3054 * same-privilege stack behavior further down. A testcase would
3055 * be nice. */
3056 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3057 {
3058 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3059 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3060 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3061 }
3062
3063 /* Make sure the selector is present. */
3064 if (!DescCS.Legacy.Gen.u1Present)
3065 {
3066 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3067 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3068 }
3069
3070 /* Check the new EIP against the new CS limit. */
3071 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3072 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3073 ? Idte.Gate.u16OffsetLow
3074 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3075 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3076 if (uNewEip > cbLimitCS)
3077 {
3078 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3079 u8Vector, uNewEip, cbLimitCS, NewCS));
3080 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3081 }
3082 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3083
3084 /* Calc the flag image to push. */
3085 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3086 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3087 fEfl &= ~X86_EFL_RF;
3088 else
3089 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3090
3091 /* From V8086 mode only go to CPL 0. */
3092 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3093 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3094 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3095 {
3096 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3097 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3098 }
3099
3100 /*
3101 * If the privilege level changes, we need to get a new stack from the TSS.
3102 * This in turns means validating the new SS and ESP...
3103 */
3104 if (uNewCpl != pVCpu->iem.s.uCpl)
3105 {
3106 RTSEL NewSS;
3107 uint32_t uNewEsp;
3108 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3109 if (rcStrict != VINF_SUCCESS)
3110 return rcStrict;
3111
3112 IEMSELDESC DescSS;
3113 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3114 if (rcStrict != VINF_SUCCESS)
3115 return rcStrict;
3116 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3117 if (!DescSS.Legacy.Gen.u1DefBig)
3118 {
3119 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3120 uNewEsp = (uint16_t)uNewEsp;
3121 }
3122
3123 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3124
3125 /* Check that there is sufficient space for the stack frame. */
3126 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3127 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3128 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3129 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3130
3131 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3132 {
3133 if ( uNewEsp - 1 > cbLimitSS
3134 || uNewEsp < cbStackFrame)
3135 {
3136 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3137 u8Vector, NewSS, uNewEsp, cbStackFrame));
3138 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3139 }
3140 }
3141 else
3142 {
3143 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3144 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3145 {
3146 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3147 u8Vector, NewSS, uNewEsp, cbStackFrame));
3148 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3149 }
3150 }
3151
3152 /*
3153 * Start making changes.
3154 */
3155
3156 /* Set the new CPL so that stack accesses use it. */
3157 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3158 pVCpu->iem.s.uCpl = uNewCpl;
3159
3160 /* Create the stack frame. */
3161 RTPTRUNION uStackFrame;
3162 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3163 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3164 if (rcStrict != VINF_SUCCESS)
3165 return rcStrict;
3166 void * const pvStackFrame = uStackFrame.pv;
3167 if (f32BitGate)
3168 {
3169 if (fFlags & IEM_XCPT_FLAGS_ERR)
3170 *uStackFrame.pu32++ = uErr;
3171 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3172 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3173 uStackFrame.pu32[2] = fEfl;
3174 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3175 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3176 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3177 if (fEfl & X86_EFL_VM)
3178 {
3179 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3180 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3181 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3182 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3183 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3184 }
3185 }
3186 else
3187 {
3188 if (fFlags & IEM_XCPT_FLAGS_ERR)
3189 *uStackFrame.pu16++ = uErr;
3190 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3191 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3192 uStackFrame.pu16[2] = fEfl;
3193 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3194 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3195 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3196 if (fEfl & X86_EFL_VM)
3197 {
3198 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3199 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3200 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3201 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3202 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3203 }
3204 }
3205 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3206 if (rcStrict != VINF_SUCCESS)
3207 return rcStrict;
3208
3209 /* Mark the selectors 'accessed' (hope this is the correct time). */
3210 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3211 * after pushing the stack frame? (Write protect the gdt + stack to
3212 * find out.) */
3213 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3214 {
3215 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3216 if (rcStrict != VINF_SUCCESS)
3217 return rcStrict;
3218 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3219 }
3220
3221 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3222 {
3223 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3224 if (rcStrict != VINF_SUCCESS)
3225 return rcStrict;
3226 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3227 }
3228
3229 /*
3230 * Start comitting the register changes (joins with the DPL=CPL branch).
3231 */
3232 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3233 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3234 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3235 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3236 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3237 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3238 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3239 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3240 * SP is loaded).
3241 * Need to check the other combinations too:
3242 * - 16-bit TSS, 32-bit handler
3243 * - 32-bit TSS, 16-bit handler */
3244 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3245 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3246 else
3247 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3248
3249 if (fEfl & X86_EFL_VM)
3250 {
3251 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3252 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3253 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3254 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3255 }
3256 }
3257 /*
3258 * Same privilege, no stack change and smaller stack frame.
3259 */
3260 else
3261 {
3262 uint64_t uNewRsp;
3263 RTPTRUNION uStackFrame;
3264 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3265 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3266 if (rcStrict != VINF_SUCCESS)
3267 return rcStrict;
3268 void * const pvStackFrame = uStackFrame.pv;
3269
3270 if (f32BitGate)
3271 {
3272 if (fFlags & IEM_XCPT_FLAGS_ERR)
3273 *uStackFrame.pu32++ = uErr;
3274 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3275 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3276 uStackFrame.pu32[2] = fEfl;
3277 }
3278 else
3279 {
3280 if (fFlags & IEM_XCPT_FLAGS_ERR)
3281 *uStackFrame.pu16++ = uErr;
3282 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3283 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3284 uStackFrame.pu16[2] = fEfl;
3285 }
3286 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3287 if (rcStrict != VINF_SUCCESS)
3288 return rcStrict;
3289
3290 /* Mark the CS selector as 'accessed'. */
3291 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3292 {
3293 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3294 if (rcStrict != VINF_SUCCESS)
3295 return rcStrict;
3296 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3297 }
3298
3299 /*
3300 * Start committing the register changes (joins with the other branch).
3301 */
3302 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3303 }
3304
3305 /* ... register committing continues. */
3306 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3307 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3308 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3309 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3310 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3311 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3312
3313 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3314 fEfl &= ~fEflToClear;
3315 IEMMISC_SET_EFL(pVCpu, fEfl);
3316
3317 if (fFlags & IEM_XCPT_FLAGS_CR2)
3318 pVCpu->cpum.GstCtx.cr2 = uCr2;
3319
3320 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3321 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3322
3323 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3324}
3325
3326
3327/**
3328 * Implements exceptions and interrupts for long mode.
3329 *
3330 * @returns VBox strict status code.
3331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3332 * @param cbInstr The number of bytes to offset rIP by in the return
3333 * address.
3334 * @param u8Vector The interrupt / exception vector number.
3335 * @param fFlags The flags.
3336 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3337 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3338 */
3339static VBOXSTRICTRC
3340iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3341 uint8_t cbInstr,
3342 uint8_t u8Vector,
3343 uint32_t fFlags,
3344 uint16_t uErr,
3345 uint64_t uCr2) RT_NOEXCEPT
3346{
3347 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3348
3349 /*
3350 * Read the IDT entry.
3351 */
3352 uint16_t offIdt = (uint16_t)u8Vector << 4;
3353 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3354 {
3355 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3356 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3357 }
3358 X86DESC64 Idte;
3359#ifdef _MSC_VER /* Shut up silly compiler warning. */
3360 Idte.au64[0] = 0;
3361 Idte.au64[1] = 0;
3362#endif
3363 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3364 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3365 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3366 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3367 {
3368 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3369 return rcStrict;
3370 }
3371 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3372 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3373 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3374
3375 /*
3376 * Check the descriptor type, DPL and such.
3377 * ASSUMES this is done in the same order as described for call-gate calls.
3378 */
3379 if (Idte.Gate.u1DescType)
3380 {
3381 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3382 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3383 }
3384 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3385 switch (Idte.Gate.u4Type)
3386 {
3387 case AMD64_SEL_TYPE_SYS_INT_GATE:
3388 fEflToClear |= X86_EFL_IF;
3389 break;
3390 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3391 break;
3392
3393 default:
3394 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3395 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3396 }
3397
3398 /* Check DPL against CPL if applicable. */
3399 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3400 {
3401 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3402 {
3403 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3404 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3405 }
3406 }
3407
3408 /* Is it there? */
3409 if (!Idte.Gate.u1Present)
3410 {
3411 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3412 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3413 }
3414
3415 /* A null CS is bad. */
3416 RTSEL NewCS = Idte.Gate.u16Sel;
3417 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3418 {
3419 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3420 return iemRaiseGeneralProtectionFault0(pVCpu);
3421 }
3422
3423 /* Fetch the descriptor for the new CS. */
3424 IEMSELDESC DescCS;
3425 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3426 if (rcStrict != VINF_SUCCESS)
3427 {
3428 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3429 return rcStrict;
3430 }
3431
3432 /* Must be a 64-bit code segment. */
3433 if (!DescCS.Long.Gen.u1DescType)
3434 {
3435 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3436 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3437 }
3438 if ( !DescCS.Long.Gen.u1Long
3439 || DescCS.Long.Gen.u1DefBig
3440 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3441 {
3442 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3443 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3444 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3445 }
3446
3447 /* Don't allow lowering the privilege level. For non-conforming CS
3448 selectors, the CS.DPL sets the privilege level the trap/interrupt
3449 handler runs at. For conforming CS selectors, the CPL remains
3450 unchanged, but the CS.DPL must be <= CPL. */
3451 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3452 * when CPU in Ring-0. Result \#GP? */
3453 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3454 {
3455 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3456 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3457 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3458 }
3459
3460
3461 /* Make sure the selector is present. */
3462 if (!DescCS.Legacy.Gen.u1Present)
3463 {
3464 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3465 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3466 }
3467
3468 /* Check that the new RIP is canonical. */
3469 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3470 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3471 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3472 if (!IEM_IS_CANONICAL(uNewRip))
3473 {
3474 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3475 return iemRaiseGeneralProtectionFault0(pVCpu);
3476 }
3477
3478 /*
3479 * If the privilege level changes or if the IST isn't zero, we need to get
3480 * a new stack from the TSS.
3481 */
3482 uint64_t uNewRsp;
3483 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3484 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3485 if ( uNewCpl != pVCpu->iem.s.uCpl
3486 || Idte.Gate.u3IST != 0)
3487 {
3488 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3489 if (rcStrict != VINF_SUCCESS)
3490 return rcStrict;
3491 }
3492 else
3493 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3494 uNewRsp &= ~(uint64_t)0xf;
3495
3496 /*
3497 * Calc the flag image to push.
3498 */
3499 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3500 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3501 fEfl &= ~X86_EFL_RF;
3502 else
3503 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3504
3505 /*
3506 * Start making changes.
3507 */
3508 /* Set the new CPL so that stack accesses use it. */
3509 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3510 pVCpu->iem.s.uCpl = uNewCpl;
3511
3512 /* Create the stack frame. */
3513 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3514 RTPTRUNION uStackFrame;
3515 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3516 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3517 if (rcStrict != VINF_SUCCESS)
3518 return rcStrict;
3519 void * const pvStackFrame = uStackFrame.pv;
3520
3521 if (fFlags & IEM_XCPT_FLAGS_ERR)
3522 *uStackFrame.pu64++ = uErr;
3523 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3524 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3525 uStackFrame.pu64[2] = fEfl;
3526 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3527 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3528 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3529 if (rcStrict != VINF_SUCCESS)
3530 return rcStrict;
3531
3532 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3533 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3534 * after pushing the stack frame? (Write protect the gdt + stack to
3535 * find out.) */
3536 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3537 {
3538 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3539 if (rcStrict != VINF_SUCCESS)
3540 return rcStrict;
3541 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3542 }
3543
3544 /*
3545 * Start comitting the register changes.
3546 */
3547 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3548 * hidden registers when interrupting 32-bit or 16-bit code! */
3549 if (uNewCpl != uOldCpl)
3550 {
3551 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3552 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3553 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3554 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3555 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3556 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3557 }
3558 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3559 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3560 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3561 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3562 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3563 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3564 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3565 pVCpu->cpum.GstCtx.rip = uNewRip;
3566
3567 fEfl &= ~fEflToClear;
3568 IEMMISC_SET_EFL(pVCpu, fEfl);
3569
3570 if (fFlags & IEM_XCPT_FLAGS_CR2)
3571 pVCpu->cpum.GstCtx.cr2 = uCr2;
3572
3573 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3574 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3575
3576 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3577}
3578
3579
3580/**
3581 * Implements exceptions and interrupts.
3582 *
3583 * All exceptions and interrupts goes thru this function!
3584 *
3585 * @returns VBox strict status code.
3586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3587 * @param cbInstr The number of bytes to offset rIP by in the return
3588 * address.
3589 * @param u8Vector The interrupt / exception vector number.
3590 * @param fFlags The flags.
3591 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3592 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3593 */
3594VBOXSTRICTRC
3595iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3596 uint8_t cbInstr,
3597 uint8_t u8Vector,
3598 uint32_t fFlags,
3599 uint16_t uErr,
3600 uint64_t uCr2) RT_NOEXCEPT
3601{
3602 /*
3603 * Get all the state that we might need here.
3604 */
3605 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3606 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3607
3608#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3609 /*
3610 * Flush prefetch buffer
3611 */
3612 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3613#endif
3614
3615 /*
3616 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3617 */
3618 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3619 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3620 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3621 | IEM_XCPT_FLAGS_BP_INSTR
3622 | IEM_XCPT_FLAGS_ICEBP_INSTR
3623 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3624 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3625 {
3626 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3627 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3628 u8Vector = X86_XCPT_GP;
3629 uErr = 0;
3630 }
3631#ifdef DBGFTRACE_ENABLED
3632 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3633 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3634 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3635#endif
3636
3637 /*
3638 * Evaluate whether NMI blocking should be in effect.
3639 * Normally, NMI blocking is in effect whenever we inject an NMI.
3640 */
3641 bool fBlockNmi;
3642 if ( u8Vector == X86_XCPT_NMI
3643 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3644 fBlockNmi = true;
3645 else
3646 fBlockNmi = false;
3647
3648#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3649 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3650 {
3651 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3652 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3653 return rcStrict0;
3654
3655 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3656 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3657 {
3658 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3659 fBlockNmi = false;
3660 }
3661 }
3662#endif
3663
3664#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3665 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3666 {
3667 /*
3668 * If the event is being injected as part of VMRUN, it isn't subject to event
3669 * intercepts in the nested-guest. However, secondary exceptions that occur
3670 * during injection of any event -are- subject to exception intercepts.
3671 *
3672 * See AMD spec. 15.20 "Event Injection".
3673 */
3674 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3675 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3676 else
3677 {
3678 /*
3679 * Check and handle if the event being raised is intercepted.
3680 */
3681 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3682 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3683 return rcStrict0;
3684 }
3685 }
3686#endif
3687
3688 /*
3689 * Set NMI blocking if necessary.
3690 */
3691 if ( fBlockNmi
3692 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3693 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3694
3695 /*
3696 * Do recursion accounting.
3697 */
3698 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3699 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3700 if (pVCpu->iem.s.cXcptRecursions == 0)
3701 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3702 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3703 else
3704 {
3705 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3706 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3707 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3708
3709 if (pVCpu->iem.s.cXcptRecursions >= 4)
3710 {
3711#ifdef DEBUG_bird
3712 AssertFailed();
3713#endif
3714 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3715 }
3716
3717 /*
3718 * Evaluate the sequence of recurring events.
3719 */
3720 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3721 NULL /* pXcptRaiseInfo */);
3722 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3723 { /* likely */ }
3724 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3725 {
3726 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3727 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3728 u8Vector = X86_XCPT_DF;
3729 uErr = 0;
3730#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3731 /* VMX nested-guest #DF intercept needs to be checked here. */
3732 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3733 {
3734 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3735 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3736 return rcStrict0;
3737 }
3738#endif
3739 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3740 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3741 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3742 }
3743 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3744 {
3745 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3746 return iemInitiateCpuShutdown(pVCpu);
3747 }
3748 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3749 {
3750 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3751 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3752 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3753 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3754 return VERR_EM_GUEST_CPU_HANG;
3755 }
3756 else
3757 {
3758 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3759 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3760 return VERR_IEM_IPE_9;
3761 }
3762
3763 /*
3764 * The 'EXT' bit is set when an exception occurs during deliver of an external
3765 * event (such as an interrupt or earlier exception)[1]. Privileged software
3766 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3767 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3768 *
3769 * [1] - Intel spec. 6.13 "Error Code"
3770 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3771 * [3] - Intel Instruction reference for INT n.
3772 */
3773 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3774 && (fFlags & IEM_XCPT_FLAGS_ERR)
3775 && u8Vector != X86_XCPT_PF
3776 && u8Vector != X86_XCPT_DF)
3777 {
3778 uErr |= X86_TRAP_ERR_EXTERNAL;
3779 }
3780 }
3781
3782 pVCpu->iem.s.cXcptRecursions++;
3783 pVCpu->iem.s.uCurXcpt = u8Vector;
3784 pVCpu->iem.s.fCurXcpt = fFlags;
3785 pVCpu->iem.s.uCurXcptErr = uErr;
3786 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3787
3788 /*
3789 * Extensive logging.
3790 */
3791#if defined(LOG_ENABLED) && defined(IN_RING3)
3792 if (LogIs3Enabled())
3793 {
3794 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3795 PVM pVM = pVCpu->CTX_SUFF(pVM);
3796 char szRegs[4096];
3797 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3798 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3799 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3800 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3801 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3802 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3803 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3804 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3805 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3806 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3807 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3808 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3809 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3810 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3811 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3812 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3813 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3814 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3815 " efer=%016VR{efer}\n"
3816 " pat=%016VR{pat}\n"
3817 " sf_mask=%016VR{sf_mask}\n"
3818 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3819 " lstar=%016VR{lstar}\n"
3820 " star=%016VR{star} cstar=%016VR{cstar}\n"
3821 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3822 );
3823
3824 char szInstr[256];
3825 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3826 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3827 szInstr, sizeof(szInstr), NULL);
3828 Log3(("%s%s\n", szRegs, szInstr));
3829 }
3830#endif /* LOG_ENABLED */
3831
3832 /*
3833 * Call the mode specific worker function.
3834 */
3835 VBOXSTRICTRC rcStrict;
3836 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3837 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3838 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3839 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3840 else
3841 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3842
3843 /* Flush the prefetch buffer. */
3844#ifdef IEM_WITH_CODE_TLB
3845 pVCpu->iem.s.pbInstrBuf = NULL;
3846#else
3847 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3848#endif
3849
3850 /*
3851 * Unwind.
3852 */
3853 pVCpu->iem.s.cXcptRecursions--;
3854 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3855 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3856 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3857 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3858 pVCpu->iem.s.cXcptRecursions + 1));
3859 return rcStrict;
3860}
3861
3862#ifdef IEM_WITH_SETJMP
3863/**
3864 * See iemRaiseXcptOrInt. Will not return.
3865 */
3866DECL_NO_RETURN(void)
3867iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3868 uint8_t cbInstr,
3869 uint8_t u8Vector,
3870 uint32_t fFlags,
3871 uint16_t uErr,
3872 uint64_t uCr2) RT_NOEXCEPT
3873{
3874 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3875 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3876}
3877#endif
3878
3879
3880/** \#DE - 00. */
3881VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3882{
3883 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3884}
3885
3886
3887/** \#DB - 01.
3888 * @note This automatically clear DR7.GD. */
3889VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3890{
3891 /** @todo set/clear RF. */
3892 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3893 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3894}
3895
3896
3897/** \#BR - 05. */
3898VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3899{
3900 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3901}
3902
3903
3904/** \#UD - 06. */
3905VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3906{
3907 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3908}
3909
3910
3911/** \#NM - 07. */
3912VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3913{
3914 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3915}
3916
3917
3918/** \#TS(err) - 0a. */
3919VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3920{
3921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3922}
3923
3924
3925/** \#TS(tr) - 0a. */
3926VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3927{
3928 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3929 pVCpu->cpum.GstCtx.tr.Sel, 0);
3930}
3931
3932
3933/** \#TS(0) - 0a. */
3934VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3935{
3936 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3937 0, 0);
3938}
3939
3940
3941/** \#TS(err) - 0a. */
3942VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3943{
3944 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3945 uSel & X86_SEL_MASK_OFF_RPL, 0);
3946}
3947
3948
3949/** \#NP(err) - 0b. */
3950VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3951{
3952 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3953}
3954
3955
3956/** \#NP(sel) - 0b. */
3957VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3958{
3959 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3960 uSel & ~X86_SEL_RPL, 0);
3961}
3962
3963
3964/** \#SS(seg) - 0c. */
3965VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3966{
3967 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3968 uSel & ~X86_SEL_RPL, 0);
3969}
3970
3971
3972/** \#SS(err) - 0c. */
3973VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3974{
3975 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3976}
3977
3978
3979/** \#GP(n) - 0d. */
3980VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3981{
3982 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3983}
3984
3985
3986/** \#GP(0) - 0d. */
3987VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3988{
3989 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3990}
3991
3992#ifdef IEM_WITH_SETJMP
3993/** \#GP(0) - 0d. */
3994DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
3995{
3996 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3997}
3998#endif
3999
4000
4001/** \#GP(sel) - 0d. */
4002VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4003{
4004 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4005 Sel & ~X86_SEL_RPL, 0);
4006}
4007
4008
4009/** \#GP(0) - 0d. */
4010VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4011{
4012 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4013}
4014
4015
4016/** \#GP(sel) - 0d. */
4017VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4018{
4019 NOREF(iSegReg); NOREF(fAccess);
4020 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4021 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4022}
4023
4024#ifdef IEM_WITH_SETJMP
4025/** \#GP(sel) - 0d, longjmp. */
4026DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4027{
4028 NOREF(iSegReg); NOREF(fAccess);
4029 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4030 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4031}
4032#endif
4033
4034/** \#GP(sel) - 0d. */
4035VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4036{
4037 NOREF(Sel);
4038 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4039}
4040
4041#ifdef IEM_WITH_SETJMP
4042/** \#GP(sel) - 0d, longjmp. */
4043DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4044{
4045 NOREF(Sel);
4046 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4047}
4048#endif
4049
4050
4051/** \#GP(sel) - 0d. */
4052VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4053{
4054 NOREF(iSegReg); NOREF(fAccess);
4055 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4056}
4057
4058#ifdef IEM_WITH_SETJMP
4059/** \#GP(sel) - 0d, longjmp. */
4060DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4061{
4062 NOREF(iSegReg); NOREF(fAccess);
4063 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4064}
4065#endif
4066
4067
4068/** \#PF(n) - 0e. */
4069VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4070{
4071 uint16_t uErr;
4072 switch (rc)
4073 {
4074 case VERR_PAGE_NOT_PRESENT:
4075 case VERR_PAGE_TABLE_NOT_PRESENT:
4076 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4077 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4078 uErr = 0;
4079 break;
4080
4081 default:
4082 AssertMsgFailed(("%Rrc\n", rc));
4083 RT_FALL_THRU();
4084 case VERR_ACCESS_DENIED:
4085 uErr = X86_TRAP_PF_P;
4086 break;
4087
4088 /** @todo reserved */
4089 }
4090
4091 if (pVCpu->iem.s.uCpl == 3)
4092 uErr |= X86_TRAP_PF_US;
4093
4094 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4095 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4096 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4097 uErr |= X86_TRAP_PF_ID;
4098
4099#if 0 /* This is so much non-sense, really. Why was it done like that? */
4100 /* Note! RW access callers reporting a WRITE protection fault, will clear
4101 the READ flag before calling. So, read-modify-write accesses (RW)
4102 can safely be reported as READ faults. */
4103 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4104 uErr |= X86_TRAP_PF_RW;
4105#else
4106 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4107 {
4108 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4109 /// (regardless of outcome of the comparison in the latter case).
4110 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4111 uErr |= X86_TRAP_PF_RW;
4112 }
4113#endif
4114
4115 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4116 uErr, GCPtrWhere);
4117}
4118
4119#ifdef IEM_WITH_SETJMP
4120/** \#PF(n) - 0e, longjmp. */
4121DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4122{
4123 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4124}
4125#endif
4126
4127
4128/** \#MF(0) - 10. */
4129VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4130{
4131 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4132}
4133
4134
4135/** \#AC(0) - 11. */
4136VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4137{
4138 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4139}
4140
4141
4142/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4143IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4144{
4145 NOREF(cbInstr);
4146 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4147}
4148
4149
4150/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4151IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4152{
4153 NOREF(cbInstr);
4154 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4155}
4156
4157
4158/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4159IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4160{
4161 NOREF(cbInstr);
4162 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4163}
4164
4165
4166/** @} */
4167
4168/** @name Common opcode decoders.
4169 * @{
4170 */
4171//#include <iprt/mem.h>
4172
4173/**
4174 * Used to add extra details about a stub case.
4175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4176 */
4177void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4178{
4179#if defined(LOG_ENABLED) && defined(IN_RING3)
4180 PVM pVM = pVCpu->CTX_SUFF(pVM);
4181 char szRegs[4096];
4182 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4183 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4184 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4185 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4186 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4187 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4188 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4189 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4190 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4191 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4192 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4193 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4194 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4195 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4196 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4197 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4198 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4199 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4200 " efer=%016VR{efer}\n"
4201 " pat=%016VR{pat}\n"
4202 " sf_mask=%016VR{sf_mask}\n"
4203 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4204 " lstar=%016VR{lstar}\n"
4205 " star=%016VR{star} cstar=%016VR{cstar}\n"
4206 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4207 );
4208
4209 char szInstr[256];
4210 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4211 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4212 szInstr, sizeof(szInstr), NULL);
4213
4214 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4215#else
4216 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4217#endif
4218}
4219
4220/** @} */
4221
4222
4223
4224/** @name Register Access.
4225 * @{
4226 */
4227
4228/**
4229 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4230 *
4231 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4232 * segment limit.
4233 *
4234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4235 * @param offNextInstr The offset of the next instruction.
4236 */
4237VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4238{
4239 switch (pVCpu->iem.s.enmEffOpSize)
4240 {
4241 case IEMMODE_16BIT:
4242 {
4243 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4244 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4245 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4246 return iemRaiseGeneralProtectionFault0(pVCpu);
4247 pVCpu->cpum.GstCtx.rip = uNewIp;
4248 break;
4249 }
4250
4251 case IEMMODE_32BIT:
4252 {
4253 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4254 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4255
4256 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4257 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4258 return iemRaiseGeneralProtectionFault0(pVCpu);
4259 pVCpu->cpum.GstCtx.rip = uNewEip;
4260 break;
4261 }
4262
4263 case IEMMODE_64BIT:
4264 {
4265 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4266
4267 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4268 if (!IEM_IS_CANONICAL(uNewRip))
4269 return iemRaiseGeneralProtectionFault0(pVCpu);
4270 pVCpu->cpum.GstCtx.rip = uNewRip;
4271 break;
4272 }
4273
4274 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4275 }
4276
4277 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4278
4279#ifndef IEM_WITH_CODE_TLB
4280 /* Flush the prefetch buffer. */
4281 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4282#endif
4283
4284 return VINF_SUCCESS;
4285}
4286
4287
4288/**
4289 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4290 *
4291 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4292 * segment limit.
4293 *
4294 * @returns Strict VBox status code.
4295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4296 * @param offNextInstr The offset of the next instruction.
4297 */
4298VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4299{
4300 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4301
4302 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4303 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4304 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4305 return iemRaiseGeneralProtectionFault0(pVCpu);
4306 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4307 pVCpu->cpum.GstCtx.rip = uNewIp;
4308 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4309
4310#ifndef IEM_WITH_CODE_TLB
4311 /* Flush the prefetch buffer. */
4312 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4313#endif
4314
4315 return VINF_SUCCESS;
4316}
4317
4318
4319/**
4320 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4321 *
4322 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4323 * segment limit.
4324 *
4325 * @returns Strict VBox status code.
4326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4327 * @param offNextInstr The offset of the next instruction.
4328 */
4329VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4330{
4331 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4332
4333 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4334 {
4335 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4336
4337 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4338 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4339 return iemRaiseGeneralProtectionFault0(pVCpu);
4340 pVCpu->cpum.GstCtx.rip = uNewEip;
4341 }
4342 else
4343 {
4344 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4345
4346 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4347 if (!IEM_IS_CANONICAL(uNewRip))
4348 return iemRaiseGeneralProtectionFault0(pVCpu);
4349 pVCpu->cpum.GstCtx.rip = uNewRip;
4350 }
4351 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4352
4353#ifndef IEM_WITH_CODE_TLB
4354 /* Flush the prefetch buffer. */
4355 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4356#endif
4357
4358 return VINF_SUCCESS;
4359}
4360
4361
4362/**
4363 * Performs a near jump to the specified address.
4364 *
4365 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4366 * segment limit.
4367 *
4368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4369 * @param uNewRip The new RIP value.
4370 */
4371VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4372{
4373 switch (pVCpu->iem.s.enmEffOpSize)
4374 {
4375 case IEMMODE_16BIT:
4376 {
4377 Assert(uNewRip <= UINT16_MAX);
4378 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4379 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4380 return iemRaiseGeneralProtectionFault0(pVCpu);
4381 /** @todo Test 16-bit jump in 64-bit mode. */
4382 pVCpu->cpum.GstCtx.rip = uNewRip;
4383 break;
4384 }
4385
4386 case IEMMODE_32BIT:
4387 {
4388 Assert(uNewRip <= UINT32_MAX);
4389 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4390 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4391
4392 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4393 return iemRaiseGeneralProtectionFault0(pVCpu);
4394 pVCpu->cpum.GstCtx.rip = uNewRip;
4395 break;
4396 }
4397
4398 case IEMMODE_64BIT:
4399 {
4400 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4401
4402 if (!IEM_IS_CANONICAL(uNewRip))
4403 return iemRaiseGeneralProtectionFault0(pVCpu);
4404 pVCpu->cpum.GstCtx.rip = uNewRip;
4405 break;
4406 }
4407
4408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4409 }
4410
4411 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4412
4413#ifndef IEM_WITH_CODE_TLB
4414 /* Flush the prefetch buffer. */
4415 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4416#endif
4417
4418 return VINF_SUCCESS;
4419}
4420
4421/** @} */
4422
4423
4424/** @name FPU access and helpers.
4425 *
4426 * @{
4427 */
4428
4429/**
4430 * Updates the x87.DS and FPUDP registers.
4431 *
4432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4433 * @param pFpuCtx The FPU context.
4434 * @param iEffSeg The effective segment register.
4435 * @param GCPtrEff The effective address relative to @a iEffSeg.
4436 */
4437DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4438{
4439 RTSEL sel;
4440 switch (iEffSeg)
4441 {
4442 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4443 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4444 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4445 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4446 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4447 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4448 default:
4449 AssertMsgFailed(("%d\n", iEffSeg));
4450 sel = pVCpu->cpum.GstCtx.ds.Sel;
4451 }
4452 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4453 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4454 {
4455 pFpuCtx->DS = 0;
4456 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4457 }
4458 else if (!IEM_IS_LONG_MODE(pVCpu))
4459 {
4460 pFpuCtx->DS = sel;
4461 pFpuCtx->FPUDP = GCPtrEff;
4462 }
4463 else
4464 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4465}
4466
4467
4468/**
4469 * Rotates the stack registers in the push direction.
4470 *
4471 * @param pFpuCtx The FPU context.
4472 * @remarks This is a complete waste of time, but fxsave stores the registers in
4473 * stack order.
4474 */
4475DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4476{
4477 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4478 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4479 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4480 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4481 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4482 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4483 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4484 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4485 pFpuCtx->aRegs[0].r80 = r80Tmp;
4486}
4487
4488
4489/**
4490 * Rotates the stack registers in the pop direction.
4491 *
4492 * @param pFpuCtx The FPU context.
4493 * @remarks This is a complete waste of time, but fxsave stores the registers in
4494 * stack order.
4495 */
4496DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4497{
4498 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4499 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4500 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4501 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4502 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4503 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4504 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4505 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4506 pFpuCtx->aRegs[7].r80 = r80Tmp;
4507}
4508
4509
4510/**
4511 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4512 * exception prevents it.
4513 *
4514 * @param pResult The FPU operation result to push.
4515 * @param pFpuCtx The FPU context.
4516 */
4517static void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4518{
4519 /* Update FSW and bail if there are pending exceptions afterwards. */
4520 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4521 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4522 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4523 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4524 {
4525 pFpuCtx->FSW = fFsw;
4526 return;
4527 }
4528
4529 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4530 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4531 {
4532 /* All is fine, push the actual value. */
4533 pFpuCtx->FTW |= RT_BIT(iNewTop);
4534 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4535 }
4536 else if (pFpuCtx->FCW & X86_FCW_IM)
4537 {
4538 /* Masked stack overflow, push QNaN. */
4539 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4540 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4541 }
4542 else
4543 {
4544 /* Raise stack overflow, don't push anything. */
4545 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4546 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4547 return;
4548 }
4549
4550 fFsw &= ~X86_FSW_TOP_MASK;
4551 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4552 pFpuCtx->FSW = fFsw;
4553
4554 iemFpuRotateStackPush(pFpuCtx);
4555}
4556
4557
4558/**
4559 * Stores a result in a FPU register and updates the FSW and FTW.
4560 *
4561 * @param pFpuCtx The FPU context.
4562 * @param pResult The result to store.
4563 * @param iStReg Which FPU register to store it in.
4564 */
4565static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4566{
4567 Assert(iStReg < 8);
4568 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4569 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4570 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4571 pFpuCtx->FTW |= RT_BIT(iReg);
4572 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4573}
4574
4575
4576/**
4577 * Only updates the FPU status word (FSW) with the result of the current
4578 * instruction.
4579 *
4580 * @param pFpuCtx The FPU context.
4581 * @param u16FSW The FSW output of the current instruction.
4582 */
4583static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4584{
4585 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4586 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4587}
4588
4589
4590/**
4591 * Pops one item off the FPU stack if no pending exception prevents it.
4592 *
4593 * @param pFpuCtx The FPU context.
4594 */
4595static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4596{
4597 /* Check pending exceptions. */
4598 uint16_t uFSW = pFpuCtx->FSW;
4599 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4600 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4601 return;
4602
4603 /* TOP--. */
4604 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4605 uFSW &= ~X86_FSW_TOP_MASK;
4606 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4607 pFpuCtx->FSW = uFSW;
4608
4609 /* Mark the previous ST0 as empty. */
4610 iOldTop >>= X86_FSW_TOP_SHIFT;
4611 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4612
4613 /* Rotate the registers. */
4614 iemFpuRotateStackPop(pFpuCtx);
4615}
4616
4617
4618/**
4619 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4620 *
4621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4622 * @param pResult The FPU operation result to push.
4623 */
4624void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4625{
4626 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4627 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4628 iemFpuMaybePushResult(pResult, pFpuCtx);
4629}
4630
4631
4632/**
4633 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4634 * and sets FPUDP and FPUDS.
4635 *
4636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4637 * @param pResult The FPU operation result to push.
4638 * @param iEffSeg The effective segment register.
4639 * @param GCPtrEff The effective address relative to @a iEffSeg.
4640 */
4641void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4642{
4643 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4644 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4646 iemFpuMaybePushResult(pResult, pFpuCtx);
4647}
4648
4649
4650/**
4651 * Replace ST0 with the first value and push the second onto the FPU stack,
4652 * unless a pending exception prevents it.
4653 *
4654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4655 * @param pResult The FPU operation result to store and push.
4656 */
4657void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4658{
4659 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4660 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4661
4662 /* Update FSW and bail if there are pending exceptions afterwards. */
4663 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4664 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4665 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4666 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4667 {
4668 pFpuCtx->FSW = fFsw;
4669 return;
4670 }
4671
4672 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4673 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4674 {
4675 /* All is fine, push the actual value. */
4676 pFpuCtx->FTW |= RT_BIT(iNewTop);
4677 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4678 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4679 }
4680 else if (pFpuCtx->FCW & X86_FCW_IM)
4681 {
4682 /* Masked stack overflow, push QNaN. */
4683 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4684 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4685 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4686 }
4687 else
4688 {
4689 /* Raise stack overflow, don't push anything. */
4690 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4691 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4692 return;
4693 }
4694
4695 fFsw &= ~X86_FSW_TOP_MASK;
4696 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4697 pFpuCtx->FSW = fFsw;
4698
4699 iemFpuRotateStackPush(pFpuCtx);
4700}
4701
4702
4703/**
4704 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4705 * FOP.
4706 *
4707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4708 * @param pResult The result to store.
4709 * @param iStReg Which FPU register to store it in.
4710 */
4711void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4712{
4713 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4714 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4715 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4716}
4717
4718
4719/**
4720 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4721 * FOP, and then pops the stack.
4722 *
4723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4724 * @param pResult The result to store.
4725 * @param iStReg Which FPU register to store it in.
4726 */
4727void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4728{
4729 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4730 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4731 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4732 iemFpuMaybePopOne(pFpuCtx);
4733}
4734
4735
4736/**
4737 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4738 * FPUDP, and FPUDS.
4739 *
4740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4741 * @param pResult The result to store.
4742 * @param iStReg Which FPU register to store it in.
4743 * @param iEffSeg The effective memory operand selector register.
4744 * @param GCPtrEff The effective memory operand offset.
4745 */
4746void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4747 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4748{
4749 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4750 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4751 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4752 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4753}
4754
4755
4756/**
4757 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4758 * FPUDP, and FPUDS, and then pops the stack.
4759 *
4760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4761 * @param pResult The result to store.
4762 * @param iStReg Which FPU register to store it in.
4763 * @param iEffSeg The effective memory operand selector register.
4764 * @param GCPtrEff The effective memory operand offset.
4765 */
4766void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4767 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4768{
4769 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4770 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4771 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4772 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4773 iemFpuMaybePopOne(pFpuCtx);
4774}
4775
4776
4777/**
4778 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4779 *
4780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4781 */
4782void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4783{
4784 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4785 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4786}
4787
4788
4789/**
4790 * Updates the FSW, FOP, FPUIP, and FPUCS.
4791 *
4792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4793 * @param u16FSW The FSW from the current instruction.
4794 */
4795void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4796{
4797 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4798 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4799 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4800}
4801
4802
4803/**
4804 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4805 *
4806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4807 * @param u16FSW The FSW from the current instruction.
4808 */
4809void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4810{
4811 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4812 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4813 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4814 iemFpuMaybePopOne(pFpuCtx);
4815}
4816
4817
4818/**
4819 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4820 *
4821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4822 * @param u16FSW The FSW from the current instruction.
4823 * @param iEffSeg The effective memory operand selector register.
4824 * @param GCPtrEff The effective memory operand offset.
4825 */
4826void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4827{
4828 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4829 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4830 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4831 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4832}
4833
4834
4835/**
4836 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4837 *
4838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4839 * @param u16FSW The FSW from the current instruction.
4840 */
4841void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4842{
4843 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4844 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4845 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4846 iemFpuMaybePopOne(pFpuCtx);
4847 iemFpuMaybePopOne(pFpuCtx);
4848}
4849
4850
4851/**
4852 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4853 *
4854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4855 * @param u16FSW The FSW from the current instruction.
4856 * @param iEffSeg The effective memory operand selector register.
4857 * @param GCPtrEff The effective memory operand offset.
4858 */
4859void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4860{
4861 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4862 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4863 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4864 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4865 iemFpuMaybePopOne(pFpuCtx);
4866}
4867
4868
4869/**
4870 * Worker routine for raising an FPU stack underflow exception.
4871 *
4872 * @param pFpuCtx The FPU context.
4873 * @param iStReg The stack register being accessed.
4874 */
4875static void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
4876{
4877 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4878 if (pFpuCtx->FCW & X86_FCW_IM)
4879 {
4880 /* Masked underflow. */
4881 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4882 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4883 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4884 if (iStReg != UINT8_MAX)
4885 {
4886 pFpuCtx->FTW |= RT_BIT(iReg);
4887 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4888 }
4889 }
4890 else
4891 {
4892 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4893 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4894 }
4895}
4896
4897
4898/**
4899 * Raises a FPU stack underflow exception.
4900 *
4901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4902 * @param iStReg The destination register that should be loaded
4903 * with QNaN if \#IS is not masked. Specify
4904 * UINT8_MAX if none (like for fcom).
4905 */
4906void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4907{
4908 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4909 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4910 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4911}
4912
4913
4914void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4915{
4916 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4917 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4918 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4919 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4920}
4921
4922
4923void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4924{
4925 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4926 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4927 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4928 iemFpuMaybePopOne(pFpuCtx);
4929}
4930
4931
4932void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4933{
4934 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4935 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4936 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4937 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4938 iemFpuMaybePopOne(pFpuCtx);
4939}
4940
4941
4942void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
4943{
4944 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4945 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4946 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
4947 iemFpuMaybePopOne(pFpuCtx);
4948 iemFpuMaybePopOne(pFpuCtx);
4949}
4950
4951
4952void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
4953{
4954 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4955 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4956
4957 if (pFpuCtx->FCW & X86_FCW_IM)
4958 {
4959 /* Masked overflow - Push QNaN. */
4960 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4961 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4962 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4963 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4964 pFpuCtx->FTW |= RT_BIT(iNewTop);
4965 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4966 iemFpuRotateStackPush(pFpuCtx);
4967 }
4968 else
4969 {
4970 /* Exception pending - don't change TOP or the register stack. */
4971 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4972 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4973 }
4974}
4975
4976
4977void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
4978{
4979 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4980 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4981
4982 if (pFpuCtx->FCW & X86_FCW_IM)
4983 {
4984 /* Masked overflow - Push QNaN. */
4985 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4986 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4987 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4988 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4989 pFpuCtx->FTW |= RT_BIT(iNewTop);
4990 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4991 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4992 iemFpuRotateStackPush(pFpuCtx);
4993 }
4994 else
4995 {
4996 /* Exception pending - don't change TOP or the register stack. */
4997 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4998 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4999 }
5000}
5001
5002
5003/**
5004 * Worker routine for raising an FPU stack overflow exception on a push.
5005 *
5006 * @param pFpuCtx The FPU context.
5007 */
5008static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5009{
5010 if (pFpuCtx->FCW & X86_FCW_IM)
5011 {
5012 /* Masked overflow. */
5013 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5014 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5015 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5016 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5017 pFpuCtx->FTW |= RT_BIT(iNewTop);
5018 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5019 iemFpuRotateStackPush(pFpuCtx);
5020 }
5021 else
5022 {
5023 /* Exception pending - don't change TOP or the register stack. */
5024 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5025 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5026 }
5027}
5028
5029
5030/**
5031 * Raises a FPU stack overflow exception on a push.
5032 *
5033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5034 */
5035void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5036{
5037 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5038 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5039 iemFpuStackPushOverflowOnly(pFpuCtx);
5040}
5041
5042
5043/**
5044 * Raises a FPU stack overflow exception on a push with a memory operand.
5045 *
5046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5047 * @param iEffSeg The effective memory operand selector register.
5048 * @param GCPtrEff The effective memory operand offset.
5049 */
5050void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5051{
5052 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5053 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5054 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5055 iemFpuStackPushOverflowOnly(pFpuCtx);
5056}
5057
5058/** @} */
5059
5060
5061/** @name Memory access.
5062 *
5063 * @{
5064 */
5065
5066
5067/**
5068 * Updates the IEMCPU::cbWritten counter if applicable.
5069 *
5070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5071 * @param fAccess The access being accounted for.
5072 * @param cbMem The access size.
5073 */
5074DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5075{
5076 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5077 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5078 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5079}
5080
5081
5082/**
5083 * Applies the segment limit, base and attributes.
5084 *
5085 * This may raise a \#GP or \#SS.
5086 *
5087 * @returns VBox strict status code.
5088 *
5089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5090 * @param fAccess The kind of access which is being performed.
5091 * @param iSegReg The index of the segment register to apply.
5092 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5093 * TSS, ++).
5094 * @param cbMem The access size.
5095 * @param pGCPtrMem Pointer to the guest memory address to apply
5096 * segmentation to. Input and output parameter.
5097 */
5098VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5099{
5100 if (iSegReg == UINT8_MAX)
5101 return VINF_SUCCESS;
5102
5103 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5104 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5105 switch (pVCpu->iem.s.enmCpuMode)
5106 {
5107 case IEMMODE_16BIT:
5108 case IEMMODE_32BIT:
5109 {
5110 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5111 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5112
5113 if ( pSel->Attr.n.u1Present
5114 && !pSel->Attr.n.u1Unusable)
5115 {
5116 Assert(pSel->Attr.n.u1DescType);
5117 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5118 {
5119 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5120 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5121 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5122
5123 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5124 {
5125 /** @todo CPL check. */
5126 }
5127
5128 /*
5129 * There are two kinds of data selectors, normal and expand down.
5130 */
5131 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5132 {
5133 if ( GCPtrFirst32 > pSel->u32Limit
5134 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5135 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5136 }
5137 else
5138 {
5139 /*
5140 * The upper boundary is defined by the B bit, not the G bit!
5141 */
5142 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5143 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5144 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5145 }
5146 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5147 }
5148 else
5149 {
5150 /*
5151 * Code selector and usually be used to read thru, writing is
5152 * only permitted in real and V8086 mode.
5153 */
5154 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5155 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5156 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5157 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5158 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5159
5160 if ( GCPtrFirst32 > pSel->u32Limit
5161 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5162 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5163
5164 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5165 {
5166 /** @todo CPL check. */
5167 }
5168
5169 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5170 }
5171 }
5172 else
5173 return iemRaiseGeneralProtectionFault0(pVCpu);
5174 return VINF_SUCCESS;
5175 }
5176
5177 case IEMMODE_64BIT:
5178 {
5179 RTGCPTR GCPtrMem = *pGCPtrMem;
5180 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5181 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5182
5183 Assert(cbMem >= 1);
5184 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5185 return VINF_SUCCESS;
5186 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5187 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5188 return iemRaiseGeneralProtectionFault0(pVCpu);
5189 }
5190
5191 default:
5192 AssertFailedReturn(VERR_IEM_IPE_7);
5193 }
5194}
5195
5196
5197/**
5198 * Translates a virtual address to a physical physical address and checks if we
5199 * can access the page as specified.
5200 *
5201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5202 * @param GCPtrMem The virtual address.
5203 * @param fAccess The intended access.
5204 * @param pGCPhysMem Where to return the physical address.
5205 */
5206VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5207{
5208 /** @todo Need a different PGM interface here. We're currently using
5209 * generic / REM interfaces. this won't cut it for R0. */
5210 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5211 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5212 * here. */
5213 PGMPTWALK Walk;
5214 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5215 if (RT_FAILURE(rc))
5216 {
5217 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5218 /** @todo Check unassigned memory in unpaged mode. */
5219 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5220#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5221 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5222 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5223#endif
5224 *pGCPhysMem = NIL_RTGCPHYS;
5225 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5226 }
5227
5228 /* If the page is writable and does not have the no-exec bit set, all
5229 access is allowed. Otherwise we'll have to check more carefully... */
5230 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5231 {
5232 /* Write to read only memory? */
5233 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5234 && !(Walk.fEffective & X86_PTE_RW)
5235 && ( ( pVCpu->iem.s.uCpl == 3
5236 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5237 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5238 {
5239 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5240 *pGCPhysMem = NIL_RTGCPHYS;
5241#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5242 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5243 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5244#endif
5245 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5246 }
5247
5248 /* Kernel memory accessed by userland? */
5249 if ( !(Walk.fEffective & X86_PTE_US)
5250 && pVCpu->iem.s.uCpl == 3
5251 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5252 {
5253 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5254 *pGCPhysMem = NIL_RTGCPHYS;
5255#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5256 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5257 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5258#endif
5259 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5260 }
5261
5262 /* Executing non-executable memory? */
5263 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5264 && (Walk.fEffective & X86_PTE_PAE_NX)
5265 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5266 {
5267 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5268 *pGCPhysMem = NIL_RTGCPHYS;
5269#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5270 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5271 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5272#endif
5273 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5274 VERR_ACCESS_DENIED);
5275 }
5276 }
5277
5278 /*
5279 * Set the dirty / access flags.
5280 * ASSUMES this is set when the address is translated rather than on committ...
5281 */
5282 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5283 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5284 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5285 {
5286 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5287 AssertRC(rc2);
5288 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5289 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5290 }
5291
5292 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5293 *pGCPhysMem = GCPhys;
5294 return VINF_SUCCESS;
5295}
5296
5297
5298/**
5299 * Looks up a memory mapping entry.
5300 *
5301 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5303 * @param pvMem The memory address.
5304 * @param fAccess The access to.
5305 */
5306DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5307{
5308 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5309 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5310 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5311 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5312 return 0;
5313 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5314 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5315 return 1;
5316 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5317 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5318 return 2;
5319 return VERR_NOT_FOUND;
5320}
5321
5322
5323/**
5324 * Finds a free memmap entry when using iNextMapping doesn't work.
5325 *
5326 * @returns Memory mapping index, 1024 on failure.
5327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5328 */
5329static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5330{
5331 /*
5332 * The easy case.
5333 */
5334 if (pVCpu->iem.s.cActiveMappings == 0)
5335 {
5336 pVCpu->iem.s.iNextMapping = 1;
5337 return 0;
5338 }
5339
5340 /* There should be enough mappings for all instructions. */
5341 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5342
5343 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5344 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5345 return i;
5346
5347 AssertFailedReturn(1024);
5348}
5349
5350
5351/**
5352 * Commits a bounce buffer that needs writing back and unmaps it.
5353 *
5354 * @returns Strict VBox status code.
5355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5356 * @param iMemMap The index of the buffer to commit.
5357 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5358 * Always false in ring-3, obviously.
5359 */
5360static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5361{
5362 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5363 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5364#ifdef IN_RING3
5365 Assert(!fPostponeFail);
5366 RT_NOREF_PV(fPostponeFail);
5367#endif
5368
5369 /*
5370 * Do the writing.
5371 */
5372 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5373 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5374 {
5375 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5376 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5377 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5378 if (!pVCpu->iem.s.fBypassHandlers)
5379 {
5380 /*
5381 * Carefully and efficiently dealing with access handler return
5382 * codes make this a little bloated.
5383 */
5384 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5385 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5386 pbBuf,
5387 cbFirst,
5388 PGMACCESSORIGIN_IEM);
5389 if (rcStrict == VINF_SUCCESS)
5390 {
5391 if (cbSecond)
5392 {
5393 rcStrict = PGMPhysWrite(pVM,
5394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5395 pbBuf + cbFirst,
5396 cbSecond,
5397 PGMACCESSORIGIN_IEM);
5398 if (rcStrict == VINF_SUCCESS)
5399 { /* nothing */ }
5400 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5401 {
5402 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5405 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5406 }
5407#ifndef IN_RING3
5408 else if (fPostponeFail)
5409 {
5410 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5413 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5414 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5415 return iemSetPassUpStatus(pVCpu, rcStrict);
5416 }
5417#endif
5418 else
5419 {
5420 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5422 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5423 return rcStrict;
5424 }
5425 }
5426 }
5427 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5428 {
5429 if (!cbSecond)
5430 {
5431 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5432 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5433 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5434 }
5435 else
5436 {
5437 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5439 pbBuf + cbFirst,
5440 cbSecond,
5441 PGMACCESSORIGIN_IEM);
5442 if (rcStrict2 == VINF_SUCCESS)
5443 {
5444 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5445 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5446 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5447 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5448 }
5449 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5450 {
5451 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5454 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5455 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5456 }
5457#ifndef IN_RING3
5458 else if (fPostponeFail)
5459 {
5460 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5461 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5462 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5463 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5464 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5465 return iemSetPassUpStatus(pVCpu, rcStrict);
5466 }
5467#endif
5468 else
5469 {
5470 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5471 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5472 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5473 return rcStrict2;
5474 }
5475 }
5476 }
5477#ifndef IN_RING3
5478 else if (fPostponeFail)
5479 {
5480 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5481 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5483 if (!cbSecond)
5484 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5485 else
5486 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5487 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5488 return iemSetPassUpStatus(pVCpu, rcStrict);
5489 }
5490#endif
5491 else
5492 {
5493 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5495 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5496 return rcStrict;
5497 }
5498 }
5499 else
5500 {
5501 /*
5502 * No access handlers, much simpler.
5503 */
5504 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5505 if (RT_SUCCESS(rc))
5506 {
5507 if (cbSecond)
5508 {
5509 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5510 if (RT_SUCCESS(rc))
5511 { /* likely */ }
5512 else
5513 {
5514 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5515 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5516 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5517 return rc;
5518 }
5519 }
5520 }
5521 else
5522 {
5523 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5524 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5525 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5526 return rc;
5527 }
5528 }
5529 }
5530
5531#if defined(IEM_LOG_MEMORY_WRITES)
5532 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5533 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5534 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5535 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5536 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5537 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5538
5539 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5540 g_cbIemWrote = cbWrote;
5541 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5542#endif
5543
5544 /*
5545 * Free the mapping entry.
5546 */
5547 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5548 Assert(pVCpu->iem.s.cActiveMappings != 0);
5549 pVCpu->iem.s.cActiveMappings--;
5550 return VINF_SUCCESS;
5551}
5552
5553
5554/**
5555 * iemMemMap worker that deals with a request crossing pages.
5556 */
5557static VBOXSTRICTRC
5558iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5559{
5560 /*
5561 * Do the address translations.
5562 */
5563 RTGCPHYS GCPhysFirst;
5564 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5565 if (rcStrict != VINF_SUCCESS)
5566 return rcStrict;
5567
5568 RTGCPHYS GCPhysSecond;
5569 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5570 fAccess, &GCPhysSecond);
5571 if (rcStrict != VINF_SUCCESS)
5572 return rcStrict;
5573 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5574
5575 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5576
5577 /*
5578 * Read in the current memory content if it's a read, execute or partial
5579 * write access.
5580 */
5581 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5582 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5583 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5584
5585 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5586 {
5587 if (!pVCpu->iem.s.fBypassHandlers)
5588 {
5589 /*
5590 * Must carefully deal with access handler status codes here,
5591 * makes the code a bit bloated.
5592 */
5593 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5594 if (rcStrict == VINF_SUCCESS)
5595 {
5596 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5597 if (rcStrict == VINF_SUCCESS)
5598 { /*likely */ }
5599 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5600 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5601 else
5602 {
5603 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5604 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5605 return rcStrict;
5606 }
5607 }
5608 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5609 {
5610 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5611 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5612 {
5613 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5614 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5615 }
5616 else
5617 {
5618 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5619 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5620 return rcStrict2;
5621 }
5622 }
5623 else
5624 {
5625 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5626 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5627 return rcStrict;
5628 }
5629 }
5630 else
5631 {
5632 /*
5633 * No informational status codes here, much more straight forward.
5634 */
5635 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5636 if (RT_SUCCESS(rc))
5637 {
5638 Assert(rc == VINF_SUCCESS);
5639 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5640 if (RT_SUCCESS(rc))
5641 Assert(rc == VINF_SUCCESS);
5642 else
5643 {
5644 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5645 return rc;
5646 }
5647 }
5648 else
5649 {
5650 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5651 return rc;
5652 }
5653 }
5654 }
5655#ifdef VBOX_STRICT
5656 else
5657 memset(pbBuf, 0xcc, cbMem);
5658 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5659 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5660#endif
5661
5662 /*
5663 * Commit the bounce buffer entry.
5664 */
5665 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5666 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5667 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5668 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5669 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5670 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5671 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5672 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5673 pVCpu->iem.s.cActiveMappings++;
5674
5675 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5676 *ppvMem = pbBuf;
5677 return VINF_SUCCESS;
5678}
5679
5680
5681/**
5682 * iemMemMap woker that deals with iemMemPageMap failures.
5683 */
5684static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5685 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5686{
5687 /*
5688 * Filter out conditions we can handle and the ones which shouldn't happen.
5689 */
5690 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5691 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5692 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5693 {
5694 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5695 return rcMap;
5696 }
5697 pVCpu->iem.s.cPotentialExits++;
5698
5699 /*
5700 * Read in the current memory content if it's a read, execute or partial
5701 * write access.
5702 */
5703 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5704 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5705 {
5706 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5707 memset(pbBuf, 0xff, cbMem);
5708 else
5709 {
5710 int rc;
5711 if (!pVCpu->iem.s.fBypassHandlers)
5712 {
5713 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5714 if (rcStrict == VINF_SUCCESS)
5715 { /* nothing */ }
5716 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5717 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5718 else
5719 {
5720 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5721 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5722 return rcStrict;
5723 }
5724 }
5725 else
5726 {
5727 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5728 if (RT_SUCCESS(rc))
5729 { /* likely */ }
5730 else
5731 {
5732 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5733 GCPhysFirst, rc));
5734 return rc;
5735 }
5736 }
5737 }
5738 }
5739#ifdef VBOX_STRICT
5740 else
5741 memset(pbBuf, 0xcc, cbMem);
5742#endif
5743#ifdef VBOX_STRICT
5744 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5745 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5746#endif
5747
5748 /*
5749 * Commit the bounce buffer entry.
5750 */
5751 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5752 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5753 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5754 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5755 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5756 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5757 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5758 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5759 pVCpu->iem.s.cActiveMappings++;
5760
5761 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5762 *ppvMem = pbBuf;
5763 return VINF_SUCCESS;
5764}
5765
5766
5767
5768/**
5769 * Maps the specified guest memory for the given kind of access.
5770 *
5771 * This may be using bounce buffering of the memory if it's crossing a page
5772 * boundary or if there is an access handler installed for any of it. Because
5773 * of lock prefix guarantees, we're in for some extra clutter when this
5774 * happens.
5775 *
5776 * This may raise a \#GP, \#SS, \#PF or \#AC.
5777 *
5778 * @returns VBox strict status code.
5779 *
5780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5781 * @param ppvMem Where to return the pointer to the mapped
5782 * memory.
5783 * @param cbMem The number of bytes to map. This is usually 1,
5784 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5785 * string operations it can be up to a page.
5786 * @param iSegReg The index of the segment register to use for
5787 * this access. The base and limits are checked.
5788 * Use UINT8_MAX to indicate that no segmentation
5789 * is required (for IDT, GDT and LDT accesses).
5790 * @param GCPtrMem The address of the guest memory.
5791 * @param fAccess How the memory is being accessed. The
5792 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5793 * how to map the memory, while the
5794 * IEM_ACCESS_WHAT_XXX bit is used when raising
5795 * exceptions.
5796 */
5797VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
5798{
5799 /*
5800 * Check the input and figure out which mapping entry to use.
5801 */
5802 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
5803 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5804 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5805
5806 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5807 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5808 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5809 {
5810 iMemMap = iemMemMapFindFree(pVCpu);
5811 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5812 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5813 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5814 pVCpu->iem.s.aMemMappings[2].fAccess),
5815 VERR_IEM_IPE_9);
5816 }
5817
5818 /*
5819 * Map the memory, checking that we can actually access it. If something
5820 * slightly complicated happens, fall back on bounce buffering.
5821 */
5822 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5823 if (rcStrict == VINF_SUCCESS)
5824 { /* likely */ }
5825 else
5826 return rcStrict;
5827
5828 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5829 { /* likely */ }
5830 else
5831 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5832
5833#ifdef IEM_WITH_DATA_TLB
5834 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5835
5836 /*
5837 * Get the TLB entry for this page.
5838 */
5839 uint64_t uTag = ((GCPtrMem << 16) >> (X86_PAGE_SHIFT + 16));
5840 Assert(!(uTag >> (48 - X86_PAGE_SHIFT)));
5841 uTag |= pVCpu->iem.s.DataTlb.uTlbRevision;
5842 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
5843 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.DataTlb.aEntries[(uint8_t)uTag];
5844 if (pTlbe->uTag == uTag)
5845 {
5846# ifdef VBOX_WITH_STATISTICS
5847 pVCpu->iem.s.DataTlb.cTlbHits++;
5848# endif
5849 }
5850 else
5851 {
5852 pVCpu->iem.s.DataTlb.cTlbMisses++;
5853 PGMPTWALK Walk;
5854 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5855 if (RT_FAILURE(rc))
5856 {
5857 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5858# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5859 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5860 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5861# endif
5862 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
5863 }
5864
5865 Assert(Walk.fSucceeded);
5866 pTlbe->uTag = uTag;
5867 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
5868 pTlbe->GCPhys = Walk.GCPhys;
5869 pTlbe->pbMappingR3 = NULL;
5870 }
5871
5872 /*
5873 * Check TLB page table level access flags.
5874 */
5875 /* If the page is either supervisor only or non-writable, we need to do
5876 more careful access checks. */
5877 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
5878 {
5879 /* Write to read only memory? */
5880 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
5881 && (fAccess & IEM_ACCESS_TYPE_WRITE)
5882 && ( ( pVCpu->iem.s.uCpl == 3
5883 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5884 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5885 {
5886 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5887# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5888 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5889 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5890# endif
5891 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5892 }
5893
5894 /* Kernel memory accessed by userland? */
5895 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
5896 && pVCpu->iem.s.uCpl == 3
5897 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5898 {
5899 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5900# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5901 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5902 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5903# endif
5904 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5905 }
5906 }
5907
5908 /*
5909 * Set the dirty / access flags.
5910 * ASSUMES this is set when the address is translated rather than on commit...
5911 */
5912 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5913 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PG_NO_ACCESSED;
5914 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
5915 {
5916 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5917 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5918 AssertRC(rc2);
5919 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5920 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5921 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
5922 }
5923
5924 /*
5925 * Look up the physical page info if necessary.
5926 */
5927 uint8_t *pbMem = NULL;
5928 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
5929# ifdef IN_RING3
5930 pbMem = pTlbe->pbMappingR3;
5931# else
5932 pbMem = NULL;
5933# endif
5934 else
5935 {
5936 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
5937 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
5938 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
5939 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
5940 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
5941 { /* likely */ }
5942 else
5943 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
5944 pTlbe->pbMappingR3 = NULL;
5945 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
5946 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
5947 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
5948 &pbMem, &pTlbe->fFlagsAndPhysRev);
5949 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
5950# ifdef IN_RING3
5951 pTlbe->pbMappingR3 = pbMem;
5952# endif
5953 }
5954
5955 /*
5956 * Check the physical page level access and mapping.
5957 */
5958 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
5959 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
5960 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
5961 { /* probably likely */ }
5962 else
5963 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
5964 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
5965 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
5966 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
5967 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
5968 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
5969
5970 if (pbMem)
5971 {
5972 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
5973 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5974 fAccess |= IEM_ACCESS_NOT_LOCKED;
5975 }
5976 else
5977 {
5978 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
5979 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5980 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
5981 if (rcStrict != VINF_SUCCESS)
5982 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5983 }
5984
5985 void * const pvMem = pbMem;
5986
5987 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5988 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
5989 if (fAccess & IEM_ACCESS_TYPE_READ)
5990 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
5991
5992#else /* !IEM_WITH_DATA_TLB */
5993
5994 RTGCPHYS GCPhysFirst;
5995 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
5996 if (rcStrict != VINF_SUCCESS)
5997 return rcStrict;
5998
5999 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6000 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6001 if (fAccess & IEM_ACCESS_TYPE_READ)
6002 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6003
6004 void *pvMem;
6005 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6006 if (rcStrict != VINF_SUCCESS)
6007 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6008
6009#endif /* !IEM_WITH_DATA_TLB */
6010
6011 /*
6012 * Fill in the mapping table entry.
6013 */
6014 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6015 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6016 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6017 pVCpu->iem.s.cActiveMappings += 1;
6018
6019 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6020 *ppvMem = pvMem;
6021
6022 return VINF_SUCCESS;
6023}
6024
6025
6026/**
6027 * Commits the guest memory if bounce buffered and unmaps it.
6028 *
6029 * @returns Strict VBox status code.
6030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6031 * @param pvMem The mapping.
6032 * @param fAccess The kind of access.
6033 */
6034VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6035{
6036 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6037 AssertReturn(iMemMap >= 0, iMemMap);
6038
6039 /* If it's bounce buffered, we may need to write back the buffer. */
6040 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6041 {
6042 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6043 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6044 }
6045 /* Otherwise unlock it. */
6046 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6047 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6048
6049 /* Free the entry. */
6050 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6051 Assert(pVCpu->iem.s.cActiveMappings != 0);
6052 pVCpu->iem.s.cActiveMappings--;
6053 return VINF_SUCCESS;
6054}
6055
6056#ifdef IEM_WITH_SETJMP
6057
6058/**
6059 * Maps the specified guest memory for the given kind of access, longjmp on
6060 * error.
6061 *
6062 * This may be using bounce buffering of the memory if it's crossing a page
6063 * boundary or if there is an access handler installed for any of it. Because
6064 * of lock prefix guarantees, we're in for some extra clutter when this
6065 * happens.
6066 *
6067 * This may raise a \#GP, \#SS, \#PF or \#AC.
6068 *
6069 * @returns Pointer to the mapped memory.
6070 *
6071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6072 * @param cbMem The number of bytes to map. This is usually 1,
6073 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6074 * string operations it can be up to a page.
6075 * @param iSegReg The index of the segment register to use for
6076 * this access. The base and limits are checked.
6077 * Use UINT8_MAX to indicate that no segmentation
6078 * is required (for IDT, GDT and LDT accesses).
6079 * @param GCPtrMem The address of the guest memory.
6080 * @param fAccess How the memory is being accessed. The
6081 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6082 * how to map the memory, while the
6083 * IEM_ACCESS_WHAT_XXX bit is used when raising
6084 * exceptions.
6085 */
6086void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
6087{
6088 /*
6089 * Check the input and figure out which mapping entry to use.
6090 */
6091 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6092 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6093 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6094
6095 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6096 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6097 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6098 {
6099 iMemMap = iemMemMapFindFree(pVCpu);
6100 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6101 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6102 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6103 pVCpu->iem.s.aMemMappings[2].fAccess),
6104 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6105 }
6106
6107 /*
6108 * Map the memory, checking that we can actually access it. If something
6109 * slightly complicated happens, fall back on bounce buffering.
6110 */
6111 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6112 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6113 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6114
6115 /* Crossing a page boundary? */
6116 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6117 { /* No (likely). */ }
6118 else
6119 {
6120 void *pvMem;
6121 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6122 if (rcStrict == VINF_SUCCESS)
6123 return pvMem;
6124 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6125 }
6126
6127#ifdef IEM_WITH_DATA_TLB
6128 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6129
6130 /*
6131 * Get the TLB entry for this page.
6132 */
6133 uint64_t uTag = ((GCPtrMem << 16) >> (X86_PAGE_SHIFT + 16));
6134 Assert(!(uTag >> (48 - X86_PAGE_SHIFT)));
6135 uTag |= pVCpu->iem.s.DataTlb.uTlbRevision;
6136 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
6137 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.DataTlb.aEntries[(uint8_t)uTag];
6138 if (pTlbe->uTag == uTag)
6139 {
6140# ifdef VBOX_WITH_STATISTICS
6141 pVCpu->iem.s.DataTlb.cTlbHits++;
6142# endif
6143 }
6144 else
6145 {
6146 pVCpu->iem.s.DataTlb.cTlbMisses++;
6147 PGMPTWALK Walk;
6148 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6149 if (RT_FAILURE(rc))
6150 {
6151 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6152# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6153 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6154 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6155# endif
6156 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6157 }
6158
6159 Assert(Walk.fSucceeded);
6160 pTlbe->uTag = uTag;
6161 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6162 pTlbe->GCPhys = Walk.GCPhys;
6163 pTlbe->pbMappingR3 = NULL;
6164 }
6165
6166 /*
6167 * Check TLB page table level access flags.
6168 */
6169 /* If the page is either supervisor only or non-writable, we need to do
6170 more careful access checks. */
6171 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6172 {
6173 /* Write to read only memory? */
6174 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6175 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6176 && ( ( pVCpu->iem.s.uCpl == 3
6177 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6178 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6179 {
6180 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6181# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6182 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6183 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6184# endif
6185 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6186 }
6187
6188 /* Kernel memory accessed by userland? */
6189 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6190 && pVCpu->iem.s.uCpl == 3
6191 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6192 {
6193 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6194# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6195 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6196 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6197# endif
6198 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6199 }
6200 }
6201
6202 /*
6203 * Set the dirty / access flags.
6204 * ASSUMES this is set when the address is translated rather than on commit...
6205 */
6206 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6207 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PG_NO_ACCESSED;
6208 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6209 {
6210 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6211 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6212 AssertRC(rc2);
6213 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6214 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6215 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6216 }
6217
6218 /*
6219 * Look up the physical page info if necessary.
6220 */
6221 uint8_t *pbMem = NULL;
6222 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6223# ifdef IN_RING3
6224 pbMem = pTlbe->pbMappingR3;
6225# else
6226 pbMem = NULL;
6227# endif
6228 else
6229 {
6230 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6231 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6232 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6233 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6234 pTlbe->pbMappingR3 = NULL;
6235 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6236 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6237 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6238 &pbMem, &pTlbe->fFlagsAndPhysRev);
6239 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6240# ifdef IN_RING3
6241 pTlbe->pbMappingR3 = pbMem;
6242# endif
6243 }
6244
6245 /*
6246 * Check the physical page level access and mapping.
6247 */
6248 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6249 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6250 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6251 { /* probably likely */ }
6252 else
6253 {
6254 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6255 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6256 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6257 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6258 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6259 if (rcStrict == VINF_SUCCESS)
6260 return pbMem;
6261 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6262 }
6263 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6264
6265 if (pbMem)
6266 {
6267 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6268 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6269 fAccess |= IEM_ACCESS_NOT_LOCKED;
6270 }
6271 else
6272 {
6273 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6274 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6275 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6276 if (rcStrict == VINF_SUCCESS)
6277 return pbMem;
6278 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6279 }
6280
6281 void * const pvMem = pbMem;
6282
6283 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6284 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6285 if (fAccess & IEM_ACCESS_TYPE_READ)
6286 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6287
6288#else /* !IEM_WITH_DATA_TLB */
6289
6290
6291 RTGCPHYS GCPhysFirst;
6292 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6293 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6294 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6295
6296 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6297 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6298 if (fAccess & IEM_ACCESS_TYPE_READ)
6299 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6300
6301 void *pvMem;
6302 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6303 if (rcStrict == VINF_SUCCESS)
6304 { /* likely */ }
6305 else
6306 {
6307 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6308 if (rcStrict == VINF_SUCCESS)
6309 return pvMem;
6310 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6311 }
6312
6313#endif /* !IEM_WITH_DATA_TLB */
6314
6315 /*
6316 * Fill in the mapping table entry.
6317 */
6318 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6319 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6320 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6321 pVCpu->iem.s.cActiveMappings++;
6322
6323 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6324 return pvMem;
6325}
6326
6327
6328/**
6329 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6330 *
6331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6332 * @param pvMem The mapping.
6333 * @param fAccess The kind of access.
6334 */
6335void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6336{
6337 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6338 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6339
6340 /* If it's bounce buffered, we may need to write back the buffer. */
6341 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6342 {
6343 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6344 {
6345 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6346 if (rcStrict == VINF_SUCCESS)
6347 return;
6348 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6349 }
6350 }
6351 /* Otherwise unlock it. */
6352 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6353 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6354
6355 /* Free the entry. */
6356 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6357 Assert(pVCpu->iem.s.cActiveMappings != 0);
6358 pVCpu->iem.s.cActiveMappings--;
6359}
6360
6361#endif /* IEM_WITH_SETJMP */
6362
6363#ifndef IN_RING3
6364/**
6365 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6366 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6367 *
6368 * Allows the instruction to be completed and retired, while the IEM user will
6369 * return to ring-3 immediately afterwards and do the postponed writes there.
6370 *
6371 * @returns VBox status code (no strict statuses). Caller must check
6372 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6374 * @param pvMem The mapping.
6375 * @param fAccess The kind of access.
6376 */
6377VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6378{
6379 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6380 AssertReturn(iMemMap >= 0, iMemMap);
6381
6382 /* If it's bounce buffered, we may need to write back the buffer. */
6383 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6384 {
6385 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6386 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6387 }
6388 /* Otherwise unlock it. */
6389 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6390 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6391
6392 /* Free the entry. */
6393 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6394 Assert(pVCpu->iem.s.cActiveMappings != 0);
6395 pVCpu->iem.s.cActiveMappings--;
6396 return VINF_SUCCESS;
6397}
6398#endif
6399
6400
6401/**
6402 * Rollbacks mappings, releasing page locks and such.
6403 *
6404 * The caller shall only call this after checking cActiveMappings.
6405 *
6406 * @returns Strict VBox status code to pass up.
6407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6408 */
6409void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6410{
6411 Assert(pVCpu->iem.s.cActiveMappings > 0);
6412
6413 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6414 while (iMemMap-- > 0)
6415 {
6416 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6417 if (fAccess != IEM_ACCESS_INVALID)
6418 {
6419 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6420 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6421 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6422 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6423 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6424 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6425 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6427 pVCpu->iem.s.cActiveMappings--;
6428 }
6429 }
6430}
6431
6432
6433/**
6434 * Fetches a data byte.
6435 *
6436 * @returns Strict VBox status code.
6437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6438 * @param pu8Dst Where to return the byte.
6439 * @param iSegReg The index of the segment register to use for
6440 * this access. The base and limits are checked.
6441 * @param GCPtrMem The address of the guest memory.
6442 */
6443VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6444{
6445 /* The lazy approach for now... */
6446 uint8_t const *pu8Src;
6447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6448 if (rc == VINF_SUCCESS)
6449 {
6450 *pu8Dst = *pu8Src;
6451 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6452 }
6453 return rc;
6454}
6455
6456
6457#ifdef IEM_WITH_SETJMP
6458/**
6459 * Fetches a data byte, longjmp on error.
6460 *
6461 * @returns The byte.
6462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6463 * @param iSegReg The index of the segment register to use for
6464 * this access. The base and limits are checked.
6465 * @param GCPtrMem The address of the guest memory.
6466 */
6467uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6468{
6469 /* The lazy approach for now... */
6470 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6471 uint8_t const bRet = *pu8Src;
6472 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6473 return bRet;
6474}
6475#endif /* IEM_WITH_SETJMP */
6476
6477
6478/**
6479 * Fetches a data word.
6480 *
6481 * @returns Strict VBox status code.
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 * @param pu16Dst Where to return the word.
6484 * @param iSegReg The index of the segment register to use for
6485 * this access. The base and limits are checked.
6486 * @param GCPtrMem The address of the guest memory.
6487 */
6488VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6489{
6490 /* The lazy approach for now... */
6491 uint16_t const *pu16Src;
6492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6493 if (rc == VINF_SUCCESS)
6494 {
6495 *pu16Dst = *pu16Src;
6496 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6497 }
6498 return rc;
6499}
6500
6501
6502#ifdef IEM_WITH_SETJMP
6503/**
6504 * Fetches a data word, longjmp on error.
6505 *
6506 * @returns The word
6507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6508 * @param iSegReg The index of the segment register to use for
6509 * this access. The base and limits are checked.
6510 * @param GCPtrMem The address of the guest memory.
6511 */
6512uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6513{
6514 /* The lazy approach for now... */
6515 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6516 uint16_t const u16Ret = *pu16Src;
6517 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6518 return u16Ret;
6519}
6520#endif
6521
6522
6523/**
6524 * Fetches a data dword.
6525 *
6526 * @returns Strict VBox status code.
6527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6528 * @param pu32Dst Where to return the dword.
6529 * @param iSegReg The index of the segment register to use for
6530 * this access. The base and limits are checked.
6531 * @param GCPtrMem The address of the guest memory.
6532 */
6533VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6534{
6535 /* The lazy approach for now... */
6536 uint32_t const *pu32Src;
6537 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6538 if (rc == VINF_SUCCESS)
6539 {
6540 *pu32Dst = *pu32Src;
6541 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6542 }
6543 return rc;
6544}
6545
6546
6547/**
6548 * Fetches a data dword and zero extends it to a qword.
6549 *
6550 * @returns Strict VBox status code.
6551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6552 * @param pu64Dst Where to return the qword.
6553 * @param iSegReg The index of the segment register to use for
6554 * this access. The base and limits are checked.
6555 * @param GCPtrMem The address of the guest memory.
6556 */
6557VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6558{
6559 /* The lazy approach for now... */
6560 uint32_t const *pu32Src;
6561 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6562 if (rc == VINF_SUCCESS)
6563 {
6564 *pu64Dst = *pu32Src;
6565 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6566 }
6567 return rc;
6568}
6569
6570
6571#ifdef IEM_WITH_SETJMP
6572
6573/**
6574 * Fetches a data dword, longjmp on error, fallback/safe version.
6575 *
6576 * @returns The dword
6577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6578 * @param iSegReg The index of the segment register to use for
6579 * this access. The base and limits are checked.
6580 * @param GCPtrMem The address of the guest memory.
6581 */
6582uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6583{
6584 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6585 uint32_t const u32Ret = *pu32Src;
6586 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6587 return u32Ret;
6588}
6589
6590
6591/**
6592 * Fetches a data dword, longjmp on error.
6593 *
6594 * @returns The dword
6595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6596 * @param iSegReg The index of the segment register to use for
6597 * this access. The base and limits are checked.
6598 * @param GCPtrMem The address of the guest memory.
6599 */
6600uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6601{
6602# if 0 //def IEM_WITH_DATA_TLB
6603 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6604 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
6605 {
6606 /// @todo more soon...
6607 }
6608
6609 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6610# else
6611 /* The lazy approach. */
6612 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6613 uint32_t const u32Ret = *pu32Src;
6614 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6615 return u32Ret;
6616# endif
6617}
6618#endif
6619
6620
6621#ifdef SOME_UNUSED_FUNCTION
6622/**
6623 * Fetches a data dword and sign extends it to a qword.
6624 *
6625 * @returns Strict VBox status code.
6626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6627 * @param pu64Dst Where to return the sign extended value.
6628 * @param iSegReg The index of the segment register to use for
6629 * this access. The base and limits are checked.
6630 * @param GCPtrMem The address of the guest memory.
6631 */
6632VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6633{
6634 /* The lazy approach for now... */
6635 int32_t const *pi32Src;
6636 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6637 if (rc == VINF_SUCCESS)
6638 {
6639 *pu64Dst = *pi32Src;
6640 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6641 }
6642#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6643 else
6644 *pu64Dst = 0;
6645#endif
6646 return rc;
6647}
6648#endif
6649
6650
6651/**
6652 * Fetches a data qword.
6653 *
6654 * @returns Strict VBox status code.
6655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6656 * @param pu64Dst Where to return the qword.
6657 * @param iSegReg The index of the segment register to use for
6658 * this access. The base and limits are checked.
6659 * @param GCPtrMem The address of the guest memory.
6660 */
6661VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6662{
6663 /* The lazy approach for now... */
6664 uint64_t const *pu64Src;
6665 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6666 if (rc == VINF_SUCCESS)
6667 {
6668 *pu64Dst = *pu64Src;
6669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6670 }
6671 return rc;
6672}
6673
6674
6675#ifdef IEM_WITH_SETJMP
6676/**
6677 * Fetches a data qword, longjmp on error.
6678 *
6679 * @returns The qword.
6680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6681 * @param iSegReg The index of the segment register to use for
6682 * this access. The base and limits are checked.
6683 * @param GCPtrMem The address of the guest memory.
6684 */
6685uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6686{
6687 /* The lazy approach for now... */
6688 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6689 uint64_t const u64Ret = *pu64Src;
6690 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6691 return u64Ret;
6692}
6693#endif
6694
6695
6696/**
6697 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6698 *
6699 * @returns Strict VBox status code.
6700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6701 * @param pu64Dst Where to return the qword.
6702 * @param iSegReg The index of the segment register to use for
6703 * this access. The base and limits are checked.
6704 * @param GCPtrMem The address of the guest memory.
6705 */
6706VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6707{
6708 /* The lazy approach for now... */
6709 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6710 if (RT_UNLIKELY(GCPtrMem & 15))
6711 return iemRaiseGeneralProtectionFault0(pVCpu);
6712
6713 uint64_t const *pu64Src;
6714 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6715 if (rc == VINF_SUCCESS)
6716 {
6717 *pu64Dst = *pu64Src;
6718 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6719 }
6720 return rc;
6721}
6722
6723
6724#ifdef IEM_WITH_SETJMP
6725/**
6726 * Fetches a data qword, longjmp on error.
6727 *
6728 * @returns The qword.
6729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6730 * @param iSegReg The index of the segment register to use for
6731 * this access. The base and limits are checked.
6732 * @param GCPtrMem The address of the guest memory.
6733 */
6734uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6735{
6736 /* The lazy approach for now... */
6737 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6738 if (RT_LIKELY(!(GCPtrMem & 15)))
6739 {
6740 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6741 uint64_t const u64Ret = *pu64Src;
6742 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6743 return u64Ret;
6744 }
6745
6746 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
6747 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
6748}
6749#endif
6750
6751
6752/**
6753 * Fetches a data tword.
6754 *
6755 * @returns Strict VBox status code.
6756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6757 * @param pr80Dst Where to return the tword.
6758 * @param iSegReg The index of the segment register to use for
6759 * this access. The base and limits are checked.
6760 * @param GCPtrMem The address of the guest memory.
6761 */
6762VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6763{
6764 /* The lazy approach for now... */
6765 PCRTFLOAT80U pr80Src;
6766 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6767 if (rc == VINF_SUCCESS)
6768 {
6769 *pr80Dst = *pr80Src;
6770 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6771 }
6772 return rc;
6773}
6774
6775
6776#ifdef IEM_WITH_SETJMP
6777/**
6778 * Fetches a data tword, longjmp on error.
6779 *
6780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6781 * @param pr80Dst Where to return the tword.
6782 * @param iSegReg The index of the segment register to use for
6783 * this access. The base and limits are checked.
6784 * @param GCPtrMem The address of the guest memory.
6785 */
6786void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6787{
6788 /* The lazy approach for now... */
6789 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6790 *pr80Dst = *pr80Src;
6791 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6792}
6793#endif
6794
6795
6796/**
6797 * Fetches a data tword.
6798 *
6799 * @returns Strict VBox status code.
6800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6801 * @param pd80Dst Where to return the tword.
6802 * @param iSegReg The index of the segment register to use for
6803 * this access. The base and limits are checked.
6804 * @param GCPtrMem The address of the guest memory.
6805 */
6806VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6807{
6808 /* The lazy approach for now... */
6809 PCRTPBCD80U pd80Src;
6810 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6811 if (rc == VINF_SUCCESS)
6812 {
6813 *pd80Dst = *pd80Src;
6814 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6815 }
6816 return rc;
6817}
6818
6819
6820#ifdef IEM_WITH_SETJMP
6821/**
6822 * Fetches a data tword, longjmp on error.
6823 *
6824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6825 * @param pd80Dst Where to return the tword.
6826 * @param iSegReg The index of the segment register to use for
6827 * this access. The base and limits are checked.
6828 * @param GCPtrMem The address of the guest memory.
6829 */
6830void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6831{
6832 /* The lazy approach for now... */
6833 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6834 *pd80Dst = *pd80Src;
6835 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6836}
6837#endif
6838
6839
6840/**
6841 * Fetches a data dqword (double qword), generally SSE related.
6842 *
6843 * @returns Strict VBox status code.
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 * @param pu128Dst Where to return the qword.
6846 * @param iSegReg The index of the segment register to use for
6847 * this access. The base and limits are checked.
6848 * @param GCPtrMem The address of the guest memory.
6849 */
6850VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6851{
6852 /* The lazy approach for now... */
6853 PCRTUINT128U pu128Src;
6854 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6855 if (rc == VINF_SUCCESS)
6856 {
6857 pu128Dst->au64[0] = pu128Src->au64[0];
6858 pu128Dst->au64[1] = pu128Src->au64[1];
6859 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6860 }
6861 return rc;
6862}
6863
6864
6865#ifdef IEM_WITH_SETJMP
6866/**
6867 * Fetches a data dqword (double qword), generally SSE related.
6868 *
6869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6870 * @param pu128Dst Where to return the qword.
6871 * @param iSegReg The index of the segment register to use for
6872 * this access. The base and limits are checked.
6873 * @param GCPtrMem The address of the guest memory.
6874 */
6875void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6876{
6877 /* The lazy approach for now... */
6878 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6879 pu128Dst->au64[0] = pu128Src->au64[0];
6880 pu128Dst->au64[1] = pu128Src->au64[1];
6881 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6882}
6883#endif
6884
6885
6886/**
6887 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6888 * related.
6889 *
6890 * Raises \#GP(0) if not aligned.
6891 *
6892 * @returns Strict VBox status code.
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 * @param pu128Dst Where to return the qword.
6895 * @param iSegReg The index of the segment register to use for
6896 * this access. The base and limits are checked.
6897 * @param GCPtrMem The address of the guest memory.
6898 */
6899VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6900{
6901 /* The lazy approach for now... */
6902 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6903 if ( (GCPtrMem & 15)
6904 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6905 return iemRaiseGeneralProtectionFault0(pVCpu);
6906
6907 PCRTUINT128U pu128Src;
6908 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6909 if (rc == VINF_SUCCESS)
6910 {
6911 pu128Dst->au64[0] = pu128Src->au64[0];
6912 pu128Dst->au64[1] = pu128Src->au64[1];
6913 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6914 }
6915 return rc;
6916}
6917
6918
6919#ifdef IEM_WITH_SETJMP
6920/**
6921 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6922 * related, longjmp on error.
6923 *
6924 * Raises \#GP(0) if not aligned.
6925 *
6926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6927 * @param pu128Dst Where to return the qword.
6928 * @param iSegReg The index of the segment register to use for
6929 * this access. The base and limits are checked.
6930 * @param GCPtrMem The address of the guest memory.
6931 */
6932void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6933{
6934 /* The lazy approach for now... */
6935 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6936 if ( (GCPtrMem & 15) == 0
6937 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6938 {
6939 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6940 pu128Dst->au64[0] = pu128Src->au64[0];
6941 pu128Dst->au64[1] = pu128Src->au64[1];
6942 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6943 return;
6944 }
6945
6946 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
6947 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6948}
6949#endif
6950
6951
6952/**
6953 * Fetches a data oword (octo word), generally AVX related.
6954 *
6955 * @returns Strict VBox status code.
6956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6957 * @param pu256Dst Where to return the qword.
6958 * @param iSegReg The index of the segment register to use for
6959 * this access. The base and limits are checked.
6960 * @param GCPtrMem The address of the guest memory.
6961 */
6962VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6963{
6964 /* The lazy approach for now... */
6965 PCRTUINT256U pu256Src;
6966 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6967 if (rc == VINF_SUCCESS)
6968 {
6969 pu256Dst->au64[0] = pu256Src->au64[0];
6970 pu256Dst->au64[1] = pu256Src->au64[1];
6971 pu256Dst->au64[2] = pu256Src->au64[2];
6972 pu256Dst->au64[3] = pu256Src->au64[3];
6973 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
6974 }
6975 return rc;
6976}
6977
6978
6979#ifdef IEM_WITH_SETJMP
6980/**
6981 * Fetches a data oword (octo word), generally AVX related.
6982 *
6983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6984 * @param pu256Dst Where to return the qword.
6985 * @param iSegReg The index of the segment register to use for
6986 * this access. The base and limits are checked.
6987 * @param GCPtrMem The address of the guest memory.
6988 */
6989void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6990{
6991 /* The lazy approach for now... */
6992 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6993 pu256Dst->au64[0] = pu256Src->au64[0];
6994 pu256Dst->au64[1] = pu256Src->au64[1];
6995 pu256Dst->au64[2] = pu256Src->au64[2];
6996 pu256Dst->au64[3] = pu256Src->au64[3];
6997 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
6998}
6999#endif
7000
7001
7002/**
7003 * Fetches a data oword (octo word) at an aligned address, generally AVX
7004 * related.
7005 *
7006 * Raises \#GP(0) if not aligned.
7007 *
7008 * @returns Strict VBox status code.
7009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7010 * @param pu256Dst Where to return the qword.
7011 * @param iSegReg The index of the segment register to use for
7012 * this access. The base and limits are checked.
7013 * @param GCPtrMem The address of the guest memory.
7014 */
7015VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7016{
7017 /* The lazy approach for now... */
7018 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
7019 if (GCPtrMem & 31)
7020 return iemRaiseGeneralProtectionFault0(pVCpu);
7021
7022 PCRTUINT256U pu256Src;
7023 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7024 if (rc == VINF_SUCCESS)
7025 {
7026 pu256Dst->au64[0] = pu256Src->au64[0];
7027 pu256Dst->au64[1] = pu256Src->au64[1];
7028 pu256Dst->au64[2] = pu256Src->au64[2];
7029 pu256Dst->au64[3] = pu256Src->au64[3];
7030 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7031 }
7032 return rc;
7033}
7034
7035
7036#ifdef IEM_WITH_SETJMP
7037/**
7038 * Fetches a data oword (octo word) at an aligned address, generally AVX
7039 * related, longjmp on error.
7040 *
7041 * Raises \#GP(0) if not aligned.
7042 *
7043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7044 * @param pu256Dst Where to return the qword.
7045 * @param iSegReg The index of the segment register to use for
7046 * this access. The base and limits are checked.
7047 * @param GCPtrMem The address of the guest memory.
7048 */
7049void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7050{
7051 /* The lazy approach for now... */
7052 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
7053 if ((GCPtrMem & 31) == 0)
7054 {
7055 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7056 pu256Dst->au64[0] = pu256Src->au64[0];
7057 pu256Dst->au64[1] = pu256Src->au64[1];
7058 pu256Dst->au64[2] = pu256Src->au64[2];
7059 pu256Dst->au64[3] = pu256Src->au64[3];
7060 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7061 return;
7062 }
7063
7064 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7065 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7066}
7067#endif
7068
7069
7070
7071/**
7072 * Fetches a descriptor register (lgdt, lidt).
7073 *
7074 * @returns Strict VBox status code.
7075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7076 * @param pcbLimit Where to return the limit.
7077 * @param pGCPtrBase Where to return the base.
7078 * @param iSegReg The index of the segment register to use for
7079 * this access. The base and limits are checked.
7080 * @param GCPtrMem The address of the guest memory.
7081 * @param enmOpSize The effective operand size.
7082 */
7083VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7084 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7085{
7086 /*
7087 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7088 * little special:
7089 * - The two reads are done separately.
7090 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7091 * - We suspect the 386 to actually commit the limit before the base in
7092 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7093 * don't try emulate this eccentric behavior, because it's not well
7094 * enough understood and rather hard to trigger.
7095 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7096 */
7097 VBOXSTRICTRC rcStrict;
7098 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7099 {
7100 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7101 if (rcStrict == VINF_SUCCESS)
7102 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7103 }
7104 else
7105 {
7106 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7107 if (enmOpSize == IEMMODE_32BIT)
7108 {
7109 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7110 {
7111 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7112 if (rcStrict == VINF_SUCCESS)
7113 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7114 }
7115 else
7116 {
7117 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7118 if (rcStrict == VINF_SUCCESS)
7119 {
7120 *pcbLimit = (uint16_t)uTmp;
7121 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7122 }
7123 }
7124 if (rcStrict == VINF_SUCCESS)
7125 *pGCPtrBase = uTmp;
7126 }
7127 else
7128 {
7129 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7130 if (rcStrict == VINF_SUCCESS)
7131 {
7132 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7133 if (rcStrict == VINF_SUCCESS)
7134 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7135 }
7136 }
7137 }
7138 return rcStrict;
7139}
7140
7141
7142
7143/**
7144 * Stores a data byte.
7145 *
7146 * @returns Strict VBox status code.
7147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7148 * @param iSegReg The index of the segment register to use for
7149 * this access. The base and limits are checked.
7150 * @param GCPtrMem The address of the guest memory.
7151 * @param u8Value The value to store.
7152 */
7153VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7154{
7155 /* The lazy approach for now... */
7156 uint8_t *pu8Dst;
7157 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7158 if (rc == VINF_SUCCESS)
7159 {
7160 *pu8Dst = u8Value;
7161 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7162 }
7163 return rc;
7164}
7165
7166
7167#ifdef IEM_WITH_SETJMP
7168/**
7169 * Stores a data byte, longjmp on error.
7170 *
7171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7172 * @param iSegReg The index of the segment register to use for
7173 * this access. The base and limits are checked.
7174 * @param GCPtrMem The address of the guest memory.
7175 * @param u8Value The value to store.
7176 */
7177void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7178{
7179 /* The lazy approach for now... */
7180 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7181 *pu8Dst = u8Value;
7182 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7183}
7184#endif
7185
7186
7187/**
7188 * Stores a data word.
7189 *
7190 * @returns Strict VBox status code.
7191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7192 * @param iSegReg The index of the segment register to use for
7193 * this access. The base and limits are checked.
7194 * @param GCPtrMem The address of the guest memory.
7195 * @param u16Value The value to store.
7196 */
7197VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7198{
7199 /* The lazy approach for now... */
7200 uint16_t *pu16Dst;
7201 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7202 if (rc == VINF_SUCCESS)
7203 {
7204 *pu16Dst = u16Value;
7205 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7206 }
7207 return rc;
7208}
7209
7210
7211#ifdef IEM_WITH_SETJMP
7212/**
7213 * Stores a data word, longjmp on error.
7214 *
7215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7216 * @param iSegReg The index of the segment register to use for
7217 * this access. The base and limits are checked.
7218 * @param GCPtrMem The address of the guest memory.
7219 * @param u16Value The value to store.
7220 */
7221void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7222{
7223 /* The lazy approach for now... */
7224 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7225 *pu16Dst = u16Value;
7226 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7227}
7228#endif
7229
7230
7231/**
7232 * Stores a data dword.
7233 *
7234 * @returns Strict VBox status code.
7235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7236 * @param iSegReg The index of the segment register to use for
7237 * this access. The base and limits are checked.
7238 * @param GCPtrMem The address of the guest memory.
7239 * @param u32Value The value to store.
7240 */
7241VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7242{
7243 /* The lazy approach for now... */
7244 uint32_t *pu32Dst;
7245 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7246 if (rc == VINF_SUCCESS)
7247 {
7248 *pu32Dst = u32Value;
7249 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7250 }
7251 return rc;
7252}
7253
7254
7255#ifdef IEM_WITH_SETJMP
7256/**
7257 * Stores a data dword.
7258 *
7259 * @returns Strict VBox status code.
7260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7261 * @param iSegReg The index of the segment register to use for
7262 * this access. The base and limits are checked.
7263 * @param GCPtrMem The address of the guest memory.
7264 * @param u32Value The value to store.
7265 */
7266void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7267{
7268 /* The lazy approach for now... */
7269 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7270 *pu32Dst = u32Value;
7271 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7272}
7273#endif
7274
7275
7276/**
7277 * Stores a data qword.
7278 *
7279 * @returns Strict VBox status code.
7280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7281 * @param iSegReg The index of the segment register to use for
7282 * this access. The base and limits are checked.
7283 * @param GCPtrMem The address of the guest memory.
7284 * @param u64Value The value to store.
7285 */
7286VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7287{
7288 /* The lazy approach for now... */
7289 uint64_t *pu64Dst;
7290 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7291 if (rc == VINF_SUCCESS)
7292 {
7293 *pu64Dst = u64Value;
7294 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7295 }
7296 return rc;
7297}
7298
7299
7300#ifdef IEM_WITH_SETJMP
7301/**
7302 * Stores a data qword, longjmp on error.
7303 *
7304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7305 * @param iSegReg The index of the segment register to use for
7306 * this access. The base and limits are checked.
7307 * @param GCPtrMem The address of the guest memory.
7308 * @param u64Value The value to store.
7309 */
7310void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7311{
7312 /* The lazy approach for now... */
7313 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7314 *pu64Dst = u64Value;
7315 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7316}
7317#endif
7318
7319
7320/**
7321 * Stores a data dqword.
7322 *
7323 * @returns Strict VBox status code.
7324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7325 * @param iSegReg The index of the segment register to use for
7326 * this access. The base and limits are checked.
7327 * @param GCPtrMem The address of the guest memory.
7328 * @param u128Value The value to store.
7329 */
7330VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7331{
7332 /* The lazy approach for now... */
7333 PRTUINT128U pu128Dst;
7334 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7335 if (rc == VINF_SUCCESS)
7336 {
7337 pu128Dst->au64[0] = u128Value.au64[0];
7338 pu128Dst->au64[1] = u128Value.au64[1];
7339 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7340 }
7341 return rc;
7342}
7343
7344
7345#ifdef IEM_WITH_SETJMP
7346/**
7347 * Stores a data dqword, longjmp on error.
7348 *
7349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7350 * @param iSegReg The index of the segment register to use for
7351 * this access. The base and limits are checked.
7352 * @param GCPtrMem The address of the guest memory.
7353 * @param u128Value The value to store.
7354 */
7355void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7356{
7357 /* The lazy approach for now... */
7358 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7359 pu128Dst->au64[0] = u128Value.au64[0];
7360 pu128Dst->au64[1] = u128Value.au64[1];
7361 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7362}
7363#endif
7364
7365
7366/**
7367 * Stores a data dqword, SSE aligned.
7368 *
7369 * @returns Strict VBox status code.
7370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7371 * @param iSegReg The index of the segment register to use for
7372 * this access. The base and limits are checked.
7373 * @param GCPtrMem The address of the guest memory.
7374 * @param u128Value The value to store.
7375 */
7376VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7377{
7378 /* The lazy approach for now... */
7379 if ( (GCPtrMem & 15)
7380 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7381 return iemRaiseGeneralProtectionFault0(pVCpu);
7382
7383 PRTUINT128U pu128Dst;
7384 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7385 if (rc == VINF_SUCCESS)
7386 {
7387 pu128Dst->au64[0] = u128Value.au64[0];
7388 pu128Dst->au64[1] = u128Value.au64[1];
7389 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7390 }
7391 return rc;
7392}
7393
7394
7395#ifdef IEM_WITH_SETJMP
7396/**
7397 * Stores a data dqword, SSE aligned.
7398 *
7399 * @returns Strict VBox status code.
7400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7401 * @param iSegReg The index of the segment register to use for
7402 * this access. The base and limits are checked.
7403 * @param GCPtrMem The address of the guest memory.
7404 * @param u128Value The value to store.
7405 */
7406void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7407{
7408 /* The lazy approach for now... */
7409 if ( (GCPtrMem & 15) == 0
7410 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7411 {
7412 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7413 pu128Dst->au64[0] = u128Value.au64[0];
7414 pu128Dst->au64[1] = u128Value.au64[1];
7415 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7416 return;
7417 }
7418
7419 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7420 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7421}
7422#endif
7423
7424
7425/**
7426 * Stores a data dqword.
7427 *
7428 * @returns Strict VBox status code.
7429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7430 * @param iSegReg The index of the segment register to use for
7431 * this access. The base and limits are checked.
7432 * @param GCPtrMem The address of the guest memory.
7433 * @param pu256Value Pointer to the value to store.
7434 */
7435VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7436{
7437 /* The lazy approach for now... */
7438 PRTUINT256U pu256Dst;
7439 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7440 if (rc == VINF_SUCCESS)
7441 {
7442 pu256Dst->au64[0] = pu256Value->au64[0];
7443 pu256Dst->au64[1] = pu256Value->au64[1];
7444 pu256Dst->au64[2] = pu256Value->au64[2];
7445 pu256Dst->au64[3] = pu256Value->au64[3];
7446 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7447 }
7448 return rc;
7449}
7450
7451
7452#ifdef IEM_WITH_SETJMP
7453/**
7454 * Stores a data dqword, longjmp on error.
7455 *
7456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7457 * @param iSegReg The index of the segment register to use for
7458 * this access. The base and limits are checked.
7459 * @param GCPtrMem The address of the guest memory.
7460 * @param pu256Value Pointer to the value to store.
7461 */
7462void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7463{
7464 /* The lazy approach for now... */
7465 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7466 pu256Dst->au64[0] = pu256Value->au64[0];
7467 pu256Dst->au64[1] = pu256Value->au64[1];
7468 pu256Dst->au64[2] = pu256Value->au64[2];
7469 pu256Dst->au64[3] = pu256Value->au64[3];
7470 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7471}
7472#endif
7473
7474
7475/**
7476 * Stores a data dqword, AVX aligned.
7477 *
7478 * @returns Strict VBox status code.
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param iSegReg The index of the segment register to use for
7481 * this access. The base and limits are checked.
7482 * @param GCPtrMem The address of the guest memory.
7483 * @param pu256Value Pointer to the value to store.
7484 */
7485VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7486{
7487 /* The lazy approach for now... */
7488 if (GCPtrMem & 31)
7489 return iemRaiseGeneralProtectionFault0(pVCpu);
7490
7491 PRTUINT256U pu256Dst;
7492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7493 if (rc == VINF_SUCCESS)
7494 {
7495 pu256Dst->au64[0] = pu256Value->au64[0];
7496 pu256Dst->au64[1] = pu256Value->au64[1];
7497 pu256Dst->au64[2] = pu256Value->au64[2];
7498 pu256Dst->au64[3] = pu256Value->au64[3];
7499 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7500 }
7501 return rc;
7502}
7503
7504
7505#ifdef IEM_WITH_SETJMP
7506/**
7507 * Stores a data dqword, AVX aligned.
7508 *
7509 * @returns Strict VBox status code.
7510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7511 * @param iSegReg The index of the segment register to use for
7512 * this access. The base and limits are checked.
7513 * @param GCPtrMem The address of the guest memory.
7514 * @param pu256Value Pointer to the value to store.
7515 */
7516void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7517{
7518 /* The lazy approach for now... */
7519 if ((GCPtrMem & 31) == 0)
7520 {
7521 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7522 pu256Dst->au64[0] = pu256Value->au64[0];
7523 pu256Dst->au64[1] = pu256Value->au64[1];
7524 pu256Dst->au64[2] = pu256Value->au64[2];
7525 pu256Dst->au64[3] = pu256Value->au64[3];
7526 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7527 return;
7528 }
7529
7530 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7531 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7532}
7533#endif
7534
7535
7536/**
7537 * Stores a descriptor register (sgdt, sidt).
7538 *
7539 * @returns Strict VBox status code.
7540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7541 * @param cbLimit The limit.
7542 * @param GCPtrBase The base address.
7543 * @param iSegReg The index of the segment register to use for
7544 * this access. The base and limits are checked.
7545 * @param GCPtrMem The address of the guest memory.
7546 */
7547VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7548{
7549 /*
7550 * The SIDT and SGDT instructions actually stores the data using two
7551 * independent writes. The instructions does not respond to opsize prefixes.
7552 */
7553 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7554 if (rcStrict == VINF_SUCCESS)
7555 {
7556 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7557 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7558 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7559 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7560 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7561 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7562 else
7563 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7564 }
7565 return rcStrict;
7566}
7567
7568
7569/**
7570 * Pushes a word onto the stack.
7571 *
7572 * @returns Strict VBox status code.
7573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7574 * @param u16Value The value to push.
7575 */
7576VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7577{
7578 /* Increment the stack pointer. */
7579 uint64_t uNewRsp;
7580 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7581
7582 /* Write the word the lazy way. */
7583 uint16_t *pu16Dst;
7584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7585 if (rc == VINF_SUCCESS)
7586 {
7587 *pu16Dst = u16Value;
7588 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7589 }
7590
7591 /* Commit the new RSP value unless we an access handler made trouble. */
7592 if (rc == VINF_SUCCESS)
7593 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7594
7595 return rc;
7596}
7597
7598
7599/**
7600 * Pushes a dword onto the stack.
7601 *
7602 * @returns Strict VBox status code.
7603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7604 * @param u32Value The value to push.
7605 */
7606VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7607{
7608 /* Increment the stack pointer. */
7609 uint64_t uNewRsp;
7610 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7611
7612 /* Write the dword the lazy way. */
7613 uint32_t *pu32Dst;
7614 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7615 if (rc == VINF_SUCCESS)
7616 {
7617 *pu32Dst = u32Value;
7618 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7619 }
7620
7621 /* Commit the new RSP value unless we an access handler made trouble. */
7622 if (rc == VINF_SUCCESS)
7623 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7624
7625 return rc;
7626}
7627
7628
7629/**
7630 * Pushes a dword segment register value onto the stack.
7631 *
7632 * @returns Strict VBox status code.
7633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7634 * @param u32Value The value to push.
7635 */
7636VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7637{
7638 /* Increment the stack pointer. */
7639 uint64_t uNewRsp;
7640 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7641
7642 /* The intel docs talks about zero extending the selector register
7643 value. My actual intel CPU here might be zero extending the value
7644 but it still only writes the lower word... */
7645 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7646 * happens when crossing an electric page boundrary, is the high word checked
7647 * for write accessibility or not? Probably it is. What about segment limits?
7648 * It appears this behavior is also shared with trap error codes.
7649 *
7650 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7651 * ancient hardware when it actually did change. */
7652 uint16_t *pu16Dst;
7653 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7654 if (rc == VINF_SUCCESS)
7655 {
7656 *pu16Dst = (uint16_t)u32Value;
7657 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7658 }
7659
7660 /* Commit the new RSP value unless we an access handler made trouble. */
7661 if (rc == VINF_SUCCESS)
7662 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7663
7664 return rc;
7665}
7666
7667
7668/**
7669 * Pushes a qword onto the stack.
7670 *
7671 * @returns Strict VBox status code.
7672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7673 * @param u64Value The value to push.
7674 */
7675VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7676{
7677 /* Increment the stack pointer. */
7678 uint64_t uNewRsp;
7679 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7680
7681 /* Write the word the lazy way. */
7682 uint64_t *pu64Dst;
7683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7684 if (rc == VINF_SUCCESS)
7685 {
7686 *pu64Dst = u64Value;
7687 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7688 }
7689
7690 /* Commit the new RSP value unless we an access handler made trouble. */
7691 if (rc == VINF_SUCCESS)
7692 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7693
7694 return rc;
7695}
7696
7697
7698/**
7699 * Pops a word from the stack.
7700 *
7701 * @returns Strict VBox status code.
7702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7703 * @param pu16Value Where to store the popped value.
7704 */
7705VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7706{
7707 /* Increment the stack pointer. */
7708 uint64_t uNewRsp;
7709 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7710
7711 /* Write the word the lazy way. */
7712 uint16_t const *pu16Src;
7713 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7714 if (rc == VINF_SUCCESS)
7715 {
7716 *pu16Value = *pu16Src;
7717 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7718
7719 /* Commit the new RSP value. */
7720 if (rc == VINF_SUCCESS)
7721 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7722 }
7723
7724 return rc;
7725}
7726
7727
7728/**
7729 * Pops a dword from the stack.
7730 *
7731 * @returns Strict VBox status code.
7732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7733 * @param pu32Value Where to store the popped value.
7734 */
7735VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7736{
7737 /* Increment the stack pointer. */
7738 uint64_t uNewRsp;
7739 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7740
7741 /* Write the word the lazy way. */
7742 uint32_t const *pu32Src;
7743 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7744 if (rc == VINF_SUCCESS)
7745 {
7746 *pu32Value = *pu32Src;
7747 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7748
7749 /* Commit the new RSP value. */
7750 if (rc == VINF_SUCCESS)
7751 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7752 }
7753
7754 return rc;
7755}
7756
7757
7758/**
7759 * Pops a qword from the stack.
7760 *
7761 * @returns Strict VBox status code.
7762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7763 * @param pu64Value Where to store the popped value.
7764 */
7765VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7766{
7767 /* Increment the stack pointer. */
7768 uint64_t uNewRsp;
7769 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7770
7771 /* Write the word the lazy way. */
7772 uint64_t const *pu64Src;
7773 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7774 if (rc == VINF_SUCCESS)
7775 {
7776 *pu64Value = *pu64Src;
7777 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7778
7779 /* Commit the new RSP value. */
7780 if (rc == VINF_SUCCESS)
7781 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7782 }
7783
7784 return rc;
7785}
7786
7787
7788/**
7789 * Pushes a word onto the stack, using a temporary stack pointer.
7790 *
7791 * @returns Strict VBox status code.
7792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7793 * @param u16Value The value to push.
7794 * @param pTmpRsp Pointer to the temporary stack pointer.
7795 */
7796VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7797{
7798 /* Increment the stack pointer. */
7799 RTUINT64U NewRsp = *pTmpRsp;
7800 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
7801
7802 /* Write the word the lazy way. */
7803 uint16_t *pu16Dst;
7804 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7805 if (rc == VINF_SUCCESS)
7806 {
7807 *pu16Dst = u16Value;
7808 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7809 }
7810
7811 /* Commit the new RSP value unless we an access handler made trouble. */
7812 if (rc == VINF_SUCCESS)
7813 *pTmpRsp = NewRsp;
7814
7815 return rc;
7816}
7817
7818
7819/**
7820 * Pushes a dword onto the stack, using a temporary stack pointer.
7821 *
7822 * @returns Strict VBox status code.
7823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7824 * @param u32Value The value to push.
7825 * @param pTmpRsp Pointer to the temporary stack pointer.
7826 */
7827VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7828{
7829 /* Increment the stack pointer. */
7830 RTUINT64U NewRsp = *pTmpRsp;
7831 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
7832
7833 /* Write the word the lazy way. */
7834 uint32_t *pu32Dst;
7835 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7836 if (rc == VINF_SUCCESS)
7837 {
7838 *pu32Dst = u32Value;
7839 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7840 }
7841
7842 /* Commit the new RSP value unless we an access handler made trouble. */
7843 if (rc == VINF_SUCCESS)
7844 *pTmpRsp = NewRsp;
7845
7846 return rc;
7847}
7848
7849
7850/**
7851 * Pushes a dword onto the stack, using a temporary stack pointer.
7852 *
7853 * @returns Strict VBox status code.
7854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7855 * @param u64Value The value to push.
7856 * @param pTmpRsp Pointer to the temporary stack pointer.
7857 */
7858VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7859{
7860 /* Increment the stack pointer. */
7861 RTUINT64U NewRsp = *pTmpRsp;
7862 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
7863
7864 /* Write the word the lazy way. */
7865 uint64_t *pu64Dst;
7866 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7867 if (rc == VINF_SUCCESS)
7868 {
7869 *pu64Dst = u64Value;
7870 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7871 }
7872
7873 /* Commit the new RSP value unless we an access handler made trouble. */
7874 if (rc == VINF_SUCCESS)
7875 *pTmpRsp = NewRsp;
7876
7877 return rc;
7878}
7879
7880
7881/**
7882 * Pops a word from the stack, using a temporary stack pointer.
7883 *
7884 * @returns Strict VBox status code.
7885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7886 * @param pu16Value Where to store the popped value.
7887 * @param pTmpRsp Pointer to the temporary stack pointer.
7888 */
7889VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7890{
7891 /* Increment the stack pointer. */
7892 RTUINT64U NewRsp = *pTmpRsp;
7893 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
7894
7895 /* Write the word the lazy way. */
7896 uint16_t const *pu16Src;
7897 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7898 if (rc == VINF_SUCCESS)
7899 {
7900 *pu16Value = *pu16Src;
7901 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7902
7903 /* Commit the new RSP value. */
7904 if (rc == VINF_SUCCESS)
7905 *pTmpRsp = NewRsp;
7906 }
7907
7908 return rc;
7909}
7910
7911
7912/**
7913 * Pops a dword from the stack, using a temporary stack pointer.
7914 *
7915 * @returns Strict VBox status code.
7916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7917 * @param pu32Value Where to store the popped value.
7918 * @param pTmpRsp Pointer to the temporary stack pointer.
7919 */
7920VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7921{
7922 /* Increment the stack pointer. */
7923 RTUINT64U NewRsp = *pTmpRsp;
7924 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
7925
7926 /* Write the word the lazy way. */
7927 uint32_t const *pu32Src;
7928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7929 if (rc == VINF_SUCCESS)
7930 {
7931 *pu32Value = *pu32Src;
7932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7933
7934 /* Commit the new RSP value. */
7935 if (rc == VINF_SUCCESS)
7936 *pTmpRsp = NewRsp;
7937 }
7938
7939 return rc;
7940}
7941
7942
7943/**
7944 * Pops a qword from the stack, using a temporary stack pointer.
7945 *
7946 * @returns Strict VBox status code.
7947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7948 * @param pu64Value Where to store the popped value.
7949 * @param pTmpRsp Pointer to the temporary stack pointer.
7950 */
7951VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7952{
7953 /* Increment the stack pointer. */
7954 RTUINT64U NewRsp = *pTmpRsp;
7955 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
7956
7957 /* Write the word the lazy way. */
7958 uint64_t const *pu64Src;
7959 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7960 if (rcStrict == VINF_SUCCESS)
7961 {
7962 *pu64Value = *pu64Src;
7963 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7964
7965 /* Commit the new RSP value. */
7966 if (rcStrict == VINF_SUCCESS)
7967 *pTmpRsp = NewRsp;
7968 }
7969
7970 return rcStrict;
7971}
7972
7973
7974/**
7975 * Begin a special stack push (used by interrupt, exceptions and such).
7976 *
7977 * This will raise \#SS or \#PF if appropriate.
7978 *
7979 * @returns Strict VBox status code.
7980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7981 * @param cbMem The number of bytes to push onto the stack.
7982 * @param ppvMem Where to return the pointer to the stack memory.
7983 * As with the other memory functions this could be
7984 * direct access or bounce buffered access, so
7985 * don't commit register until the commit call
7986 * succeeds.
7987 * @param puNewRsp Where to return the new RSP value. This must be
7988 * passed unchanged to
7989 * iemMemStackPushCommitSpecial().
7990 */
7991VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7992{
7993 Assert(cbMem < UINT8_MAX);
7994 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7995 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7996}
7997
7998
7999/**
8000 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8001 *
8002 * This will update the rSP.
8003 *
8004 * @returns Strict VBox status code.
8005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8006 * @param pvMem The pointer returned by
8007 * iemMemStackPushBeginSpecial().
8008 * @param uNewRsp The new RSP value returned by
8009 * iemMemStackPushBeginSpecial().
8010 */
8011VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8012{
8013 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8014 if (rcStrict == VINF_SUCCESS)
8015 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8016 return rcStrict;
8017}
8018
8019
8020/**
8021 * Begin a special stack pop (used by iret, retf and such).
8022 *
8023 * This will raise \#SS or \#PF if appropriate.
8024 *
8025 * @returns Strict VBox status code.
8026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8027 * @param cbMem The number of bytes to pop from the stack.
8028 * @param ppvMem Where to return the pointer to the stack memory.
8029 * @param puNewRsp Where to return the new RSP value. This must be
8030 * assigned to CPUMCTX::rsp manually some time
8031 * after iemMemStackPopDoneSpecial() has been
8032 * called.
8033 */
8034VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8035{
8036 Assert(cbMem < UINT8_MAX);
8037 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8038 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8039}
8040
8041
8042/**
8043 * Continue a special stack pop (used by iret and retf).
8044 *
8045 * This will raise \#SS or \#PF if appropriate.
8046 *
8047 * @returns Strict VBox status code.
8048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8049 * @param cbMem The number of bytes to pop from the stack.
8050 * @param ppvMem Where to return the pointer to the stack memory.
8051 * @param puNewRsp Where to return the new RSP value. This must be
8052 * assigned to CPUMCTX::rsp manually some time
8053 * after iemMemStackPopDoneSpecial() has been
8054 * called.
8055 */
8056VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8057{
8058 Assert(cbMem < UINT8_MAX);
8059 RTUINT64U NewRsp;
8060 NewRsp.u = *puNewRsp;
8061 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8062 *puNewRsp = NewRsp.u;
8063 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8064}
8065
8066
8067/**
8068 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8069 * iemMemStackPopContinueSpecial).
8070 *
8071 * The caller will manually commit the rSP.
8072 *
8073 * @returns Strict VBox status code.
8074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8075 * @param pvMem The pointer returned by
8076 * iemMemStackPopBeginSpecial() or
8077 * iemMemStackPopContinueSpecial().
8078 */
8079VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8080{
8081 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8082}
8083
8084
8085/**
8086 * Fetches a system table byte.
8087 *
8088 * @returns Strict VBox status code.
8089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8090 * @param pbDst Where to return the byte.
8091 * @param iSegReg The index of the segment register to use for
8092 * this access. The base and limits are checked.
8093 * @param GCPtrMem The address of the guest memory.
8094 */
8095VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8096{
8097 /* The lazy approach for now... */
8098 uint8_t const *pbSrc;
8099 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8100 if (rc == VINF_SUCCESS)
8101 {
8102 *pbDst = *pbSrc;
8103 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8104 }
8105 return rc;
8106}
8107
8108
8109/**
8110 * Fetches a system table word.
8111 *
8112 * @returns Strict VBox status code.
8113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8114 * @param pu16Dst Where to return the word.
8115 * @param iSegReg The index of the segment register to use for
8116 * this access. The base and limits are checked.
8117 * @param GCPtrMem The address of the guest memory.
8118 */
8119VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8120{
8121 /* The lazy approach for now... */
8122 uint16_t const *pu16Src;
8123 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8124 if (rc == VINF_SUCCESS)
8125 {
8126 *pu16Dst = *pu16Src;
8127 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8128 }
8129 return rc;
8130}
8131
8132
8133/**
8134 * Fetches a system table dword.
8135 *
8136 * @returns Strict VBox status code.
8137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8138 * @param pu32Dst Where to return the dword.
8139 * @param iSegReg The index of the segment register to use for
8140 * this access. The base and limits are checked.
8141 * @param GCPtrMem The address of the guest memory.
8142 */
8143VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8144{
8145 /* The lazy approach for now... */
8146 uint32_t const *pu32Src;
8147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8148 if (rc == VINF_SUCCESS)
8149 {
8150 *pu32Dst = *pu32Src;
8151 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8152 }
8153 return rc;
8154}
8155
8156
8157/**
8158 * Fetches a system table qword.
8159 *
8160 * @returns Strict VBox status code.
8161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8162 * @param pu64Dst Where to return the qword.
8163 * @param iSegReg The index of the segment register to use for
8164 * this access. The base and limits are checked.
8165 * @param GCPtrMem The address of the guest memory.
8166 */
8167VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8168{
8169 /* The lazy approach for now... */
8170 uint64_t const *pu64Src;
8171 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8172 if (rc == VINF_SUCCESS)
8173 {
8174 *pu64Dst = *pu64Src;
8175 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8176 }
8177 return rc;
8178}
8179
8180
8181/**
8182 * Fetches a descriptor table entry with caller specified error code.
8183 *
8184 * @returns Strict VBox status code.
8185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8186 * @param pDesc Where to return the descriptor table entry.
8187 * @param uSel The selector which table entry to fetch.
8188 * @param uXcpt The exception to raise on table lookup error.
8189 * @param uErrorCode The error code associated with the exception.
8190 */
8191static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8192 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8193{
8194 AssertPtr(pDesc);
8195 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8196
8197 /** @todo did the 286 require all 8 bytes to be accessible? */
8198 /*
8199 * Get the selector table base and check bounds.
8200 */
8201 RTGCPTR GCPtrBase;
8202 if (uSel & X86_SEL_LDT)
8203 {
8204 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8205 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8206 {
8207 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8208 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8209 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8210 uErrorCode, 0);
8211 }
8212
8213 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8214 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8215 }
8216 else
8217 {
8218 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8219 {
8220 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8221 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8222 uErrorCode, 0);
8223 }
8224 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8225 }
8226
8227 /*
8228 * Read the legacy descriptor and maybe the long mode extensions if
8229 * required.
8230 */
8231 VBOXSTRICTRC rcStrict;
8232 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8233 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8234 else
8235 {
8236 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8237 if (rcStrict == VINF_SUCCESS)
8238 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8239 if (rcStrict == VINF_SUCCESS)
8240 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8241 if (rcStrict == VINF_SUCCESS)
8242 pDesc->Legacy.au16[3] = 0;
8243 else
8244 return rcStrict;
8245 }
8246
8247 if (rcStrict == VINF_SUCCESS)
8248 {
8249 if ( !IEM_IS_LONG_MODE(pVCpu)
8250 || pDesc->Legacy.Gen.u1DescType)
8251 pDesc->Long.au64[1] = 0;
8252 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8253 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8254 else
8255 {
8256 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8257 /** @todo is this the right exception? */
8258 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8259 }
8260 }
8261 return rcStrict;
8262}
8263
8264
8265/**
8266 * Fetches a descriptor table entry.
8267 *
8268 * @returns Strict VBox status code.
8269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8270 * @param pDesc Where to return the descriptor table entry.
8271 * @param uSel The selector which table entry to fetch.
8272 * @param uXcpt The exception to raise on table lookup error.
8273 */
8274VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8275{
8276 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8277}
8278
8279
8280/**
8281 * Marks the selector descriptor as accessed (only non-system descriptors).
8282 *
8283 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8284 * will therefore skip the limit checks.
8285 *
8286 * @returns Strict VBox status code.
8287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8288 * @param uSel The selector.
8289 */
8290VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8291{
8292 /*
8293 * Get the selector table base and calculate the entry address.
8294 */
8295 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8296 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8297 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8298 GCPtr += uSel & X86_SEL_MASK;
8299
8300 /*
8301 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8302 * ugly stuff to avoid this. This will make sure it's an atomic access
8303 * as well more or less remove any question about 8-bit or 32-bit accesss.
8304 */
8305 VBOXSTRICTRC rcStrict;
8306 uint32_t volatile *pu32;
8307 if ((GCPtr & 3) == 0)
8308 {
8309 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8310 GCPtr += 2 + 2;
8311 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8312 if (rcStrict != VINF_SUCCESS)
8313 return rcStrict;
8314 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8315 }
8316 else
8317 {
8318 /* The misaligned GDT/LDT case, map the whole thing. */
8319 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8320 if (rcStrict != VINF_SUCCESS)
8321 return rcStrict;
8322 switch ((uintptr_t)pu32 & 3)
8323 {
8324 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8325 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8326 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8327 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8328 }
8329 }
8330
8331 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8332}
8333
8334/** @} */
8335
8336/** @name Opcode Helpers.
8337 * @{
8338 */
8339
8340/**
8341 * Calculates the effective address of a ModR/M memory operand.
8342 *
8343 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8344 *
8345 * @return Strict VBox status code.
8346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8347 * @param bRm The ModRM byte.
8348 * @param cbImm The size of any immediate following the
8349 * effective address opcode bytes. Important for
8350 * RIP relative addressing.
8351 * @param pGCPtrEff Where to return the effective address.
8352 */
8353VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8354{
8355 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8356# define SET_SS_DEF() \
8357 do \
8358 { \
8359 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8360 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8361 } while (0)
8362
8363 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8364 {
8365/** @todo Check the effective address size crap! */
8366 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8367 {
8368 uint16_t u16EffAddr;
8369
8370 /* Handle the disp16 form with no registers first. */
8371 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8372 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8373 else
8374 {
8375 /* Get the displacment. */
8376 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8377 {
8378 case 0: u16EffAddr = 0; break;
8379 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8380 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8381 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8382 }
8383
8384 /* Add the base and index registers to the disp. */
8385 switch (bRm & X86_MODRM_RM_MASK)
8386 {
8387 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8388 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8389 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8390 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8391 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8392 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8393 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8394 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8395 }
8396 }
8397
8398 *pGCPtrEff = u16EffAddr;
8399 }
8400 else
8401 {
8402 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8403 uint32_t u32EffAddr;
8404
8405 /* Handle the disp32 form with no registers first. */
8406 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8407 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8408 else
8409 {
8410 /* Get the register (or SIB) value. */
8411 switch ((bRm & X86_MODRM_RM_MASK))
8412 {
8413 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8414 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8415 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8416 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8417 case 4: /* SIB */
8418 {
8419 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8420
8421 /* Get the index and scale it. */
8422 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8423 {
8424 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8425 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8426 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8427 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8428 case 4: u32EffAddr = 0; /*none */ break;
8429 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8430 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8431 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8432 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8433 }
8434 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8435
8436 /* add base */
8437 switch (bSib & X86_SIB_BASE_MASK)
8438 {
8439 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8440 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8441 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8442 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8443 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8444 case 5:
8445 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8446 {
8447 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8448 SET_SS_DEF();
8449 }
8450 else
8451 {
8452 uint32_t u32Disp;
8453 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8454 u32EffAddr += u32Disp;
8455 }
8456 break;
8457 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8458 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8459 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8460 }
8461 break;
8462 }
8463 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8464 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8465 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8467 }
8468
8469 /* Get and add the displacement. */
8470 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8471 {
8472 case 0:
8473 break;
8474 case 1:
8475 {
8476 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8477 u32EffAddr += i8Disp;
8478 break;
8479 }
8480 case 2:
8481 {
8482 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8483 u32EffAddr += u32Disp;
8484 break;
8485 }
8486 default:
8487 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8488 }
8489
8490 }
8491 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8492 *pGCPtrEff = u32EffAddr;
8493 else
8494 {
8495 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8496 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8497 }
8498 }
8499 }
8500 else
8501 {
8502 uint64_t u64EffAddr;
8503
8504 /* Handle the rip+disp32 form with no registers first. */
8505 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8506 {
8507 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8508 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8509 }
8510 else
8511 {
8512 /* Get the register (or SIB) value. */
8513 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8514 {
8515 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8516 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8517 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8518 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8519 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8520 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8521 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8522 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8523 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8524 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8525 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8526 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8527 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8528 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8529 /* SIB */
8530 case 4:
8531 case 12:
8532 {
8533 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8534
8535 /* Get the index and scale it. */
8536 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8537 {
8538 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8539 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8540 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8541 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8542 case 4: u64EffAddr = 0; /*none */ break;
8543 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8544 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8545 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8546 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8547 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8548 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8549 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8550 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8551 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8552 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8553 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8555 }
8556 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8557
8558 /* add base */
8559 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8560 {
8561 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8562 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8563 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8564 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8565 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8566 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8567 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8568 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8569 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8570 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8571 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8572 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8573 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8574 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8575 /* complicated encodings */
8576 case 5:
8577 case 13:
8578 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8579 {
8580 if (!pVCpu->iem.s.uRexB)
8581 {
8582 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8583 SET_SS_DEF();
8584 }
8585 else
8586 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8587 }
8588 else
8589 {
8590 uint32_t u32Disp;
8591 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8592 u64EffAddr += (int32_t)u32Disp;
8593 }
8594 break;
8595 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8596 }
8597 break;
8598 }
8599 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8600 }
8601
8602 /* Get and add the displacement. */
8603 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8604 {
8605 case 0:
8606 break;
8607 case 1:
8608 {
8609 int8_t i8Disp;
8610 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8611 u64EffAddr += i8Disp;
8612 break;
8613 }
8614 case 2:
8615 {
8616 uint32_t u32Disp;
8617 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8618 u64EffAddr += (int32_t)u32Disp;
8619 break;
8620 }
8621 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8622 }
8623
8624 }
8625
8626 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8627 *pGCPtrEff = u64EffAddr;
8628 else
8629 {
8630 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8631 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8632 }
8633 }
8634
8635 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8636 return VINF_SUCCESS;
8637}
8638
8639
8640/**
8641 * Calculates the effective address of a ModR/M memory operand.
8642 *
8643 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8644 *
8645 * @return Strict VBox status code.
8646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8647 * @param bRm The ModRM byte.
8648 * @param cbImm The size of any immediate following the
8649 * effective address opcode bytes. Important for
8650 * RIP relative addressing.
8651 * @param pGCPtrEff Where to return the effective address.
8652 * @param offRsp RSP displacement.
8653 */
8654VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8655{
8656 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8657# define SET_SS_DEF() \
8658 do \
8659 { \
8660 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8661 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8662 } while (0)
8663
8664 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8665 {
8666/** @todo Check the effective address size crap! */
8667 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8668 {
8669 uint16_t u16EffAddr;
8670
8671 /* Handle the disp16 form with no registers first. */
8672 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8673 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8674 else
8675 {
8676 /* Get the displacment. */
8677 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8678 {
8679 case 0: u16EffAddr = 0; break;
8680 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8681 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8682 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8683 }
8684
8685 /* Add the base and index registers to the disp. */
8686 switch (bRm & X86_MODRM_RM_MASK)
8687 {
8688 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8689 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8690 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8691 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8692 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8693 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8694 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8695 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8696 }
8697 }
8698
8699 *pGCPtrEff = u16EffAddr;
8700 }
8701 else
8702 {
8703 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8704 uint32_t u32EffAddr;
8705
8706 /* Handle the disp32 form with no registers first. */
8707 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8708 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8709 else
8710 {
8711 /* Get the register (or SIB) value. */
8712 switch ((bRm & X86_MODRM_RM_MASK))
8713 {
8714 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8715 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8716 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8717 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8718 case 4: /* SIB */
8719 {
8720 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8721
8722 /* Get the index and scale it. */
8723 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8724 {
8725 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8726 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8727 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8728 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8729 case 4: u32EffAddr = 0; /*none */ break;
8730 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8731 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8732 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8733 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8734 }
8735 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8736
8737 /* add base */
8738 switch (bSib & X86_SIB_BASE_MASK)
8739 {
8740 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8741 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8742 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8743 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8744 case 4:
8745 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8746 SET_SS_DEF();
8747 break;
8748 case 5:
8749 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8750 {
8751 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8752 SET_SS_DEF();
8753 }
8754 else
8755 {
8756 uint32_t u32Disp;
8757 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8758 u32EffAddr += u32Disp;
8759 }
8760 break;
8761 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8762 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8763 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8764 }
8765 break;
8766 }
8767 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8768 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8769 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8770 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8771 }
8772
8773 /* Get and add the displacement. */
8774 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8775 {
8776 case 0:
8777 break;
8778 case 1:
8779 {
8780 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8781 u32EffAddr += i8Disp;
8782 break;
8783 }
8784 case 2:
8785 {
8786 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8787 u32EffAddr += u32Disp;
8788 break;
8789 }
8790 default:
8791 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8792 }
8793
8794 }
8795 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8796 *pGCPtrEff = u32EffAddr;
8797 else
8798 {
8799 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8800 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8801 }
8802 }
8803 }
8804 else
8805 {
8806 uint64_t u64EffAddr;
8807
8808 /* Handle the rip+disp32 form with no registers first. */
8809 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8810 {
8811 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8812 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8813 }
8814 else
8815 {
8816 /* Get the register (or SIB) value. */
8817 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8818 {
8819 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8820 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8821 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8822 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8823 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8824 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8825 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8826 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8827 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8828 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8829 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8830 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8831 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8832 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8833 /* SIB */
8834 case 4:
8835 case 12:
8836 {
8837 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8838
8839 /* Get the index and scale it. */
8840 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8841 {
8842 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8843 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8844 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8845 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8846 case 4: u64EffAddr = 0; /*none */ break;
8847 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8848 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8849 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8850 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8851 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8852 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8853 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8854 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8855 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8856 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8857 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8858 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8859 }
8860 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8861
8862 /* add base */
8863 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8864 {
8865 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8866 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8867 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8868 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8869 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
8870 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8871 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8872 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8873 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8874 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8875 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8876 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8877 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8878 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8879 /* complicated encodings */
8880 case 5:
8881 case 13:
8882 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8883 {
8884 if (!pVCpu->iem.s.uRexB)
8885 {
8886 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8887 SET_SS_DEF();
8888 }
8889 else
8890 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8891 }
8892 else
8893 {
8894 uint32_t u32Disp;
8895 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8896 u64EffAddr += (int32_t)u32Disp;
8897 }
8898 break;
8899 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8900 }
8901 break;
8902 }
8903 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8904 }
8905
8906 /* Get and add the displacement. */
8907 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8908 {
8909 case 0:
8910 break;
8911 case 1:
8912 {
8913 int8_t i8Disp;
8914 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8915 u64EffAddr += i8Disp;
8916 break;
8917 }
8918 case 2:
8919 {
8920 uint32_t u32Disp;
8921 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8922 u64EffAddr += (int32_t)u32Disp;
8923 break;
8924 }
8925 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8926 }
8927
8928 }
8929
8930 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8931 *pGCPtrEff = u64EffAddr;
8932 else
8933 {
8934 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8935 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8936 }
8937 }
8938
8939 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8940 return VINF_SUCCESS;
8941}
8942
8943
8944#ifdef IEM_WITH_SETJMP
8945/**
8946 * Calculates the effective address of a ModR/M memory operand.
8947 *
8948 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8949 *
8950 * May longjmp on internal error.
8951 *
8952 * @return The effective address.
8953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8954 * @param bRm The ModRM byte.
8955 * @param cbImm The size of any immediate following the
8956 * effective address opcode bytes. Important for
8957 * RIP relative addressing.
8958 */
8959RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
8960{
8961 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8962# define SET_SS_DEF() \
8963 do \
8964 { \
8965 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8966 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8967 } while (0)
8968
8969 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8970 {
8971/** @todo Check the effective address size crap! */
8972 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8973 {
8974 uint16_t u16EffAddr;
8975
8976 /* Handle the disp16 form with no registers first. */
8977 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8978 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8979 else
8980 {
8981 /* Get the displacment. */
8982 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8983 {
8984 case 0: u16EffAddr = 0; break;
8985 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8986 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8987 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
8988 }
8989
8990 /* Add the base and index registers to the disp. */
8991 switch (bRm & X86_MODRM_RM_MASK)
8992 {
8993 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8994 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8995 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8996 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8997 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8998 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8999 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9000 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9001 }
9002 }
9003
9004 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9005 return u16EffAddr;
9006 }
9007
9008 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9009 uint32_t u32EffAddr;
9010
9011 /* Handle the disp32 form with no registers first. */
9012 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9013 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9014 else
9015 {
9016 /* Get the register (or SIB) value. */
9017 switch ((bRm & X86_MODRM_RM_MASK))
9018 {
9019 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9020 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9021 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9022 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9023 case 4: /* SIB */
9024 {
9025 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9026
9027 /* Get the index and scale it. */
9028 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9029 {
9030 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9031 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9032 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9033 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9034 case 4: u32EffAddr = 0; /*none */ break;
9035 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9036 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9037 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9038 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9039 }
9040 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9041
9042 /* add base */
9043 switch (bSib & X86_SIB_BASE_MASK)
9044 {
9045 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9046 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9047 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9048 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9049 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9050 case 5:
9051 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9052 {
9053 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9054 SET_SS_DEF();
9055 }
9056 else
9057 {
9058 uint32_t u32Disp;
9059 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9060 u32EffAddr += u32Disp;
9061 }
9062 break;
9063 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9064 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9065 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9066 }
9067 break;
9068 }
9069 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9070 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9071 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9072 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9073 }
9074
9075 /* Get and add the displacement. */
9076 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9077 {
9078 case 0:
9079 break;
9080 case 1:
9081 {
9082 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9083 u32EffAddr += i8Disp;
9084 break;
9085 }
9086 case 2:
9087 {
9088 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9089 u32EffAddr += u32Disp;
9090 break;
9091 }
9092 default:
9093 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9094 }
9095 }
9096
9097 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9098 {
9099 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9100 return u32EffAddr;
9101 }
9102 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9103 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9104 return u32EffAddr & UINT16_MAX;
9105 }
9106
9107 uint64_t u64EffAddr;
9108
9109 /* Handle the rip+disp32 form with no registers first. */
9110 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9111 {
9112 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9113 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9114 }
9115 else
9116 {
9117 /* Get the register (or SIB) value. */
9118 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9119 {
9120 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9121 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9122 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9123 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9124 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9125 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9126 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9127 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9128 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9129 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9130 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9131 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9132 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9133 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9134 /* SIB */
9135 case 4:
9136 case 12:
9137 {
9138 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9139
9140 /* Get the index and scale it. */
9141 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9142 {
9143 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9144 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9145 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9146 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9147 case 4: u64EffAddr = 0; /*none */ break;
9148 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9149 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9150 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9151 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9152 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9153 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9154 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9155 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9156 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9157 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9158 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9159 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9160 }
9161 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9162
9163 /* add base */
9164 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9165 {
9166 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9167 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9168 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9169 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9170 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9171 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9172 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9173 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9174 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9175 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9176 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9177 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9178 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9179 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9180 /* complicated encodings */
9181 case 5:
9182 case 13:
9183 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9184 {
9185 if (!pVCpu->iem.s.uRexB)
9186 {
9187 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9188 SET_SS_DEF();
9189 }
9190 else
9191 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9192 }
9193 else
9194 {
9195 uint32_t u32Disp;
9196 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9197 u64EffAddr += (int32_t)u32Disp;
9198 }
9199 break;
9200 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9201 }
9202 break;
9203 }
9204 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9205 }
9206
9207 /* Get and add the displacement. */
9208 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9209 {
9210 case 0:
9211 break;
9212 case 1:
9213 {
9214 int8_t i8Disp;
9215 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9216 u64EffAddr += i8Disp;
9217 break;
9218 }
9219 case 2:
9220 {
9221 uint32_t u32Disp;
9222 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9223 u64EffAddr += (int32_t)u32Disp;
9224 break;
9225 }
9226 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9227 }
9228
9229 }
9230
9231 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9232 {
9233 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9234 return u64EffAddr;
9235 }
9236 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9237 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9238 return u64EffAddr & UINT32_MAX;
9239}
9240#endif /* IEM_WITH_SETJMP */
9241
9242/** @} */
9243
9244
9245#ifdef LOG_ENABLED
9246/**
9247 * Logs the current instruction.
9248 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9249 * @param fSameCtx Set if we have the same context information as the VMM,
9250 * clear if we may have already executed an instruction in
9251 * our debug context. When clear, we assume IEMCPU holds
9252 * valid CPU mode info.
9253 *
9254 * The @a fSameCtx parameter is now misleading and obsolete.
9255 * @param pszFunction The IEM function doing the execution.
9256 */
9257static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9258{
9259# ifdef IN_RING3
9260 if (LogIs2Enabled())
9261 {
9262 char szInstr[256];
9263 uint32_t cbInstr = 0;
9264 if (fSameCtx)
9265 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9266 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9267 szInstr, sizeof(szInstr), &cbInstr);
9268 else
9269 {
9270 uint32_t fFlags = 0;
9271 switch (pVCpu->iem.s.enmCpuMode)
9272 {
9273 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9274 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9275 case IEMMODE_16BIT:
9276 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9277 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9278 else
9279 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9280 break;
9281 }
9282 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9283 szInstr, sizeof(szInstr), &cbInstr);
9284 }
9285
9286 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9287 Log2(("**** %s\n"
9288 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9289 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9290 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9291 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9292 " %s\n"
9293 , pszFunction,
9294 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9295 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9296 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9297 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9298 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9299 szInstr));
9300
9301 if (LogIs3Enabled())
9302 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9303 }
9304 else
9305# endif
9306 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9307 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9308 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9309}
9310#endif /* LOG_ENABLED */
9311
9312
9313#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9314/**
9315 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9316 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9317 *
9318 * @returns Modified rcStrict.
9319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9320 * @param rcStrict The instruction execution status.
9321 */
9322static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9323{
9324 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9325 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9326 {
9327 /* VMX preemption timer takes priority over NMI-window exits. */
9328 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9329 {
9330 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9331 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9332 }
9333 /*
9334 * Check remaining intercepts.
9335 *
9336 * NMI-window and Interrupt-window VM-exits.
9337 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9338 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9339 *
9340 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9341 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9342 */
9343 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9344 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9345 && !TRPMHasTrap(pVCpu))
9346 {
9347 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9348 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9349 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9350 {
9351 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9352 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9353 }
9354 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9355 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9356 {
9357 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9358 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9359 }
9360 }
9361 }
9362 /* TPR-below threshold/APIC write has the highest priority. */
9363 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9364 {
9365 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9366 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9367 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9368 }
9369 /* MTF takes priority over VMX-preemption timer. */
9370 else
9371 {
9372 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9373 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9374 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9375 }
9376 return rcStrict;
9377}
9378#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9379
9380
9381/**
9382 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9383 * IEMExecOneWithPrefetchedByPC.
9384 *
9385 * Similar code is found in IEMExecLots.
9386 *
9387 * @return Strict VBox status code.
9388 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9389 * @param fExecuteInhibit If set, execute the instruction following CLI,
9390 * POP SS and MOV SS,GR.
9391 * @param pszFunction The calling function name.
9392 */
9393DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9394{
9395 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9396 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9397 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9398 RT_NOREF_PV(pszFunction);
9399
9400#ifdef IEM_WITH_SETJMP
9401 VBOXSTRICTRC rcStrict;
9402 jmp_buf JmpBuf;
9403 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9404 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9405 if ((rcStrict = setjmp(JmpBuf)) == 0)
9406 {
9407 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9408 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9409 }
9410 else
9411 pVCpu->iem.s.cLongJumps++;
9412 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9413#else
9414 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9415 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9416#endif
9417 if (rcStrict == VINF_SUCCESS)
9418 pVCpu->iem.s.cInstructions++;
9419 if (pVCpu->iem.s.cActiveMappings > 0)
9420 {
9421 Assert(rcStrict != VINF_SUCCESS);
9422 iemMemRollback(pVCpu);
9423 }
9424 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9425 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9426 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9427
9428//#ifdef DEBUG
9429// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9430//#endif
9431
9432#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9433 /*
9434 * Perform any VMX nested-guest instruction boundary actions.
9435 *
9436 * If any of these causes a VM-exit, we must skip executing the next
9437 * instruction (would run into stale page tables). A VM-exit makes sure
9438 * there is no interrupt-inhibition, so that should ensure we don't go
9439 * to try execute the next instruction. Clearing fExecuteInhibit is
9440 * problematic because of the setjmp/longjmp clobbering above.
9441 */
9442 if ( rcStrict == VINF_SUCCESS
9443 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9444 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9445 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
9446#endif
9447
9448 /* Execute the next instruction as well if a cli, pop ss or
9449 mov ss, Gr has just completed successfully. */
9450 if ( fExecuteInhibit
9451 && rcStrict == VINF_SUCCESS
9452 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9453 && EMIsInhibitInterruptsActive(pVCpu))
9454 {
9455 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9456 if (rcStrict == VINF_SUCCESS)
9457 {
9458#ifdef LOG_ENABLED
9459 iemLogCurInstr(pVCpu, false, pszFunction);
9460#endif
9461#ifdef IEM_WITH_SETJMP
9462 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9463 if ((rcStrict = setjmp(JmpBuf)) == 0)
9464 {
9465 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9466 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9467 }
9468 else
9469 pVCpu->iem.s.cLongJumps++;
9470 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9471#else
9472 IEM_OPCODE_GET_NEXT_U8(&b);
9473 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9474#endif
9475 if (rcStrict == VINF_SUCCESS)
9476 pVCpu->iem.s.cInstructions++;
9477 if (pVCpu->iem.s.cActiveMappings > 0)
9478 {
9479 Assert(rcStrict != VINF_SUCCESS);
9480 iemMemRollback(pVCpu);
9481 }
9482 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9483 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9484 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9485 }
9486 else if (pVCpu->iem.s.cActiveMappings > 0)
9487 iemMemRollback(pVCpu);
9488 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9489 }
9490
9491 /*
9492 * Return value fiddling, statistics and sanity assertions.
9493 */
9494 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9495
9496 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9497 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9498 return rcStrict;
9499}
9500
9501
9502/**
9503 * Execute one instruction.
9504 *
9505 * @return Strict VBox status code.
9506 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9507 */
9508VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9509{
9510 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9511#ifdef LOG_ENABLED
9512 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9513#endif
9514
9515 /*
9516 * Do the decoding and emulation.
9517 */
9518 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9519 if (rcStrict == VINF_SUCCESS)
9520 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9521 else if (pVCpu->iem.s.cActiveMappings > 0)
9522 iemMemRollback(pVCpu);
9523
9524 if (rcStrict != VINF_SUCCESS)
9525 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9526 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9527 return rcStrict;
9528}
9529
9530
9531VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9532{
9533 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9534
9535 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9536 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9537 if (rcStrict == VINF_SUCCESS)
9538 {
9539 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9540 if (pcbWritten)
9541 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9542 }
9543 else if (pVCpu->iem.s.cActiveMappings > 0)
9544 iemMemRollback(pVCpu);
9545
9546 return rcStrict;
9547}
9548
9549
9550VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9551 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9552{
9553 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9554
9555 VBOXSTRICTRC rcStrict;
9556 if ( cbOpcodeBytes
9557 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9558 {
9559 iemInitDecoder(pVCpu, false, false);
9560#ifdef IEM_WITH_CODE_TLB
9561 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9562 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9563 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9564 pVCpu->iem.s.offCurInstrStart = 0;
9565 pVCpu->iem.s.offInstrNextByte = 0;
9566#else
9567 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9568 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9569#endif
9570 rcStrict = VINF_SUCCESS;
9571 }
9572 else
9573 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9574 if (rcStrict == VINF_SUCCESS)
9575 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9576 else if (pVCpu->iem.s.cActiveMappings > 0)
9577 iemMemRollback(pVCpu);
9578
9579 return rcStrict;
9580}
9581
9582
9583VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9584{
9585 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9586
9587 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9588 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9589 if (rcStrict == VINF_SUCCESS)
9590 {
9591 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9592 if (pcbWritten)
9593 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9594 }
9595 else if (pVCpu->iem.s.cActiveMappings > 0)
9596 iemMemRollback(pVCpu);
9597
9598 return rcStrict;
9599}
9600
9601
9602VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9603 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9604{
9605 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9606
9607 VBOXSTRICTRC rcStrict;
9608 if ( cbOpcodeBytes
9609 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9610 {
9611 iemInitDecoder(pVCpu, true, false);
9612#ifdef IEM_WITH_CODE_TLB
9613 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9614 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9615 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9616 pVCpu->iem.s.offCurInstrStart = 0;
9617 pVCpu->iem.s.offInstrNextByte = 0;
9618#else
9619 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9620 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9621#endif
9622 rcStrict = VINF_SUCCESS;
9623 }
9624 else
9625 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9626 if (rcStrict == VINF_SUCCESS)
9627 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9628 else if (pVCpu->iem.s.cActiveMappings > 0)
9629 iemMemRollback(pVCpu);
9630
9631 return rcStrict;
9632}
9633
9634
9635/**
9636 * For debugging DISGetParamSize, may come in handy.
9637 *
9638 * @returns Strict VBox status code.
9639 * @param pVCpu The cross context virtual CPU structure of the
9640 * calling EMT.
9641 * @param pCtxCore The context core structure.
9642 * @param OpcodeBytesPC The PC of the opcode bytes.
9643 * @param pvOpcodeBytes Prefeched opcode bytes.
9644 * @param cbOpcodeBytes Number of prefetched bytes.
9645 * @param pcbWritten Where to return the number of bytes written.
9646 * Optional.
9647 */
9648VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9649 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9650 uint32_t *pcbWritten)
9651{
9652 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9653
9654 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9655 VBOXSTRICTRC rcStrict;
9656 if ( cbOpcodeBytes
9657 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9658 {
9659 iemInitDecoder(pVCpu, true, false);
9660#ifdef IEM_WITH_CODE_TLB
9661 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9662 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9663 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9664 pVCpu->iem.s.offCurInstrStart = 0;
9665 pVCpu->iem.s.offInstrNextByte = 0;
9666#else
9667 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9668 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9669#endif
9670 rcStrict = VINF_SUCCESS;
9671 }
9672 else
9673 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9674 if (rcStrict == VINF_SUCCESS)
9675 {
9676 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9677 if (pcbWritten)
9678 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9679 }
9680 else if (pVCpu->iem.s.cActiveMappings > 0)
9681 iemMemRollback(pVCpu);
9682
9683 return rcStrict;
9684}
9685
9686
9687/**
9688 * For handling split cacheline lock operations when the host has split-lock
9689 * detection enabled.
9690 *
9691 * This will cause the interpreter to disregard the lock prefix and implicit
9692 * locking (xchg).
9693 *
9694 * @returns Strict VBox status code.
9695 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9696 */
9697VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9698{
9699 /*
9700 * Do the decoding and emulation.
9701 */
9702 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9703 if (rcStrict == VINF_SUCCESS)
9704 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9705 else if (pVCpu->iem.s.cActiveMappings > 0)
9706 iemMemRollback(pVCpu);
9707
9708 if (rcStrict != VINF_SUCCESS)
9709 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9710 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9711 return rcStrict;
9712}
9713
9714
9715VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9716{
9717 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9718 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9719
9720 /*
9721 * See if there is an interrupt pending in TRPM, inject it if we can.
9722 */
9723 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9724#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9725 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9726 if (fIntrEnabled)
9727 {
9728 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9729 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9730 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9731 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9732 else
9733 {
9734 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9735 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9736 }
9737 }
9738#else
9739 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9740#endif
9741
9742 /** @todo What if we are injecting an exception and not an interrupt? Is that
9743 * possible here? For now we assert it is indeed only an interrupt. */
9744 if ( fIntrEnabled
9745 && TRPMHasTrap(pVCpu)
9746 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
9747 {
9748 uint8_t u8TrapNo;
9749 TRPMEVENT enmType;
9750 uint32_t uErrCode;
9751 RTGCPTR uCr2;
9752 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
9753 AssertRC(rc2);
9754 Assert(enmType == TRPM_HARDWARE_INT);
9755 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9756 TRPMResetTrap(pVCpu);
9757#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9758 /* Injecting an event may cause a VM-exit. */
9759 if ( rcStrict != VINF_SUCCESS
9760 && rcStrict != VINF_IEM_RAISED_XCPT)
9761 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9762#else
9763 NOREF(rcStrict);
9764#endif
9765 }
9766
9767 /*
9768 * Initial decoder init w/ prefetch, then setup setjmp.
9769 */
9770 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9771 if (rcStrict == VINF_SUCCESS)
9772 {
9773#ifdef IEM_WITH_SETJMP
9774 jmp_buf JmpBuf;
9775 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9776 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9777 pVCpu->iem.s.cActiveMappings = 0;
9778 if ((rcStrict = setjmp(JmpBuf)) == 0)
9779#endif
9780 {
9781 /*
9782 * The run loop. We limit ourselves to 4096 instructions right now.
9783 */
9784 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9785 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9786 for (;;)
9787 {
9788 /*
9789 * Log the state.
9790 */
9791#ifdef LOG_ENABLED
9792 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9793#endif
9794
9795 /*
9796 * Do the decoding and emulation.
9797 */
9798 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9799 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9800 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9801 {
9802 Assert(pVCpu->iem.s.cActiveMappings == 0);
9803 pVCpu->iem.s.cInstructions++;
9804 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9805 {
9806 uint64_t fCpu = pVCpu->fLocalForcedActions
9807 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9808 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9809 | VMCPU_FF_TLB_FLUSH
9810 | VMCPU_FF_INHIBIT_INTERRUPTS
9811 | VMCPU_FF_BLOCK_NMIS
9812 | VMCPU_FF_UNHALT ));
9813
9814 if (RT_LIKELY( ( !fCpu
9815 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9816 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9817 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9818 {
9819 if (cMaxInstructionsGccStupidity-- > 0)
9820 {
9821 /* Poll timers every now an then according to the caller's specs. */
9822 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9823 || !TMTimerPollBool(pVM, pVCpu))
9824 {
9825 Assert(pVCpu->iem.s.cActiveMappings == 0);
9826 iemReInitDecoder(pVCpu);
9827 continue;
9828 }
9829 }
9830 }
9831 }
9832 Assert(pVCpu->iem.s.cActiveMappings == 0);
9833 }
9834 else if (pVCpu->iem.s.cActiveMappings > 0)
9835 iemMemRollback(pVCpu);
9836 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9837 break;
9838 }
9839 }
9840#ifdef IEM_WITH_SETJMP
9841 else
9842 {
9843 if (pVCpu->iem.s.cActiveMappings > 0)
9844 iemMemRollback(pVCpu);
9845# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9846 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9847# endif
9848 pVCpu->iem.s.cLongJumps++;
9849 }
9850 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9851#endif
9852
9853 /*
9854 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9855 */
9856 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9857 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9858 }
9859 else
9860 {
9861 if (pVCpu->iem.s.cActiveMappings > 0)
9862 iemMemRollback(pVCpu);
9863
9864#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9865 /*
9866 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9867 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9868 */
9869 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9870#endif
9871 }
9872
9873 /*
9874 * Maybe re-enter raw-mode and log.
9875 */
9876 if (rcStrict != VINF_SUCCESS)
9877 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9878 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9879 if (pcInstructions)
9880 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9881 return rcStrict;
9882}
9883
9884
9885/**
9886 * Interface used by EMExecuteExec, does exit statistics and limits.
9887 *
9888 * @returns Strict VBox status code.
9889 * @param pVCpu The cross context virtual CPU structure.
9890 * @param fWillExit To be defined.
9891 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9892 * @param cMaxInstructions Maximum number of instructions to execute.
9893 * @param cMaxInstructionsWithoutExits
9894 * The max number of instructions without exits.
9895 * @param pStats Where to return statistics.
9896 */
9897VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9898 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9899{
9900 NOREF(fWillExit); /** @todo define flexible exit crits */
9901
9902 /*
9903 * Initialize return stats.
9904 */
9905 pStats->cInstructions = 0;
9906 pStats->cExits = 0;
9907 pStats->cMaxExitDistance = 0;
9908 pStats->cReserved = 0;
9909
9910 /*
9911 * Initial decoder init w/ prefetch, then setup setjmp.
9912 */
9913 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9914 if (rcStrict == VINF_SUCCESS)
9915 {
9916#ifdef IEM_WITH_SETJMP
9917 jmp_buf JmpBuf;
9918 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9919 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9920 pVCpu->iem.s.cActiveMappings = 0;
9921 if ((rcStrict = setjmp(JmpBuf)) == 0)
9922#endif
9923 {
9924#ifdef IN_RING0
9925 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9926#endif
9927 uint32_t cInstructionSinceLastExit = 0;
9928
9929 /*
9930 * The run loop. We limit ourselves to 4096 instructions right now.
9931 */
9932 PVM pVM = pVCpu->CTX_SUFF(pVM);
9933 for (;;)
9934 {
9935 /*
9936 * Log the state.
9937 */
9938#ifdef LOG_ENABLED
9939 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9940#endif
9941
9942 /*
9943 * Do the decoding and emulation.
9944 */
9945 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9946
9947 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9948 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9949
9950 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9951 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9952 {
9953 pStats->cExits += 1;
9954 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9955 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9956 cInstructionSinceLastExit = 0;
9957 }
9958
9959 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9960 {
9961 Assert(pVCpu->iem.s.cActiveMappings == 0);
9962 pVCpu->iem.s.cInstructions++;
9963 pStats->cInstructions++;
9964 cInstructionSinceLastExit++;
9965 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9966 {
9967 uint64_t fCpu = pVCpu->fLocalForcedActions
9968 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9969 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9970 | VMCPU_FF_TLB_FLUSH
9971 | VMCPU_FF_INHIBIT_INTERRUPTS
9972 | VMCPU_FF_BLOCK_NMIS
9973 | VMCPU_FF_UNHALT ));
9974
9975 if (RT_LIKELY( ( ( !fCpu
9976 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9977 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9978 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9979 || pStats->cInstructions < cMinInstructions))
9980 {
9981 if (pStats->cInstructions < cMaxInstructions)
9982 {
9983 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9984 {
9985#ifdef IN_RING0
9986 if ( !fCheckPreemptionPending
9987 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9988#endif
9989 {
9990 Assert(pVCpu->iem.s.cActiveMappings == 0);
9991 iemReInitDecoder(pVCpu);
9992 continue;
9993 }
9994#ifdef IN_RING0
9995 rcStrict = VINF_EM_RAW_INTERRUPT;
9996 break;
9997#endif
9998 }
9999 }
10000 }
10001 Assert(!(fCpu & VMCPU_FF_IEM));
10002 }
10003 Assert(pVCpu->iem.s.cActiveMappings == 0);
10004 }
10005 else if (pVCpu->iem.s.cActiveMappings > 0)
10006 iemMemRollback(pVCpu);
10007 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10008 break;
10009 }
10010 }
10011#ifdef IEM_WITH_SETJMP
10012 else
10013 {
10014 if (pVCpu->iem.s.cActiveMappings > 0)
10015 iemMemRollback(pVCpu);
10016 pVCpu->iem.s.cLongJumps++;
10017 }
10018 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10019#endif
10020
10021 /*
10022 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10023 */
10024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10025 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10026 }
10027 else
10028 {
10029 if (pVCpu->iem.s.cActiveMappings > 0)
10030 iemMemRollback(pVCpu);
10031
10032#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10033 /*
10034 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10035 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10036 */
10037 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10038#endif
10039 }
10040
10041 /*
10042 * Maybe re-enter raw-mode and log.
10043 */
10044 if (rcStrict != VINF_SUCCESS)
10045 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10046 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10047 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10048 return rcStrict;
10049}
10050
10051
10052/**
10053 * Injects a trap, fault, abort, software interrupt or external interrupt.
10054 *
10055 * The parameter list matches TRPMQueryTrapAll pretty closely.
10056 *
10057 * @returns Strict VBox status code.
10058 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10059 * @param u8TrapNo The trap number.
10060 * @param enmType What type is it (trap/fault/abort), software
10061 * interrupt or hardware interrupt.
10062 * @param uErrCode The error code if applicable.
10063 * @param uCr2 The CR2 value if applicable.
10064 * @param cbInstr The instruction length (only relevant for
10065 * software interrupts).
10066 */
10067VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10068 uint8_t cbInstr)
10069{
10070 iemInitDecoder(pVCpu, false, false);
10071#ifdef DBGFTRACE_ENABLED
10072 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10073 u8TrapNo, enmType, uErrCode, uCr2);
10074#endif
10075
10076 uint32_t fFlags;
10077 switch (enmType)
10078 {
10079 case TRPM_HARDWARE_INT:
10080 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10081 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10082 uErrCode = uCr2 = 0;
10083 break;
10084
10085 case TRPM_SOFTWARE_INT:
10086 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10087 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10088 uErrCode = uCr2 = 0;
10089 break;
10090
10091 case TRPM_TRAP:
10092 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10093 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10094 if (u8TrapNo == X86_XCPT_PF)
10095 fFlags |= IEM_XCPT_FLAGS_CR2;
10096 switch (u8TrapNo)
10097 {
10098 case X86_XCPT_DF:
10099 case X86_XCPT_TS:
10100 case X86_XCPT_NP:
10101 case X86_XCPT_SS:
10102 case X86_XCPT_PF:
10103 case X86_XCPT_AC:
10104 case X86_XCPT_GP:
10105 fFlags |= IEM_XCPT_FLAGS_ERR;
10106 break;
10107 }
10108 break;
10109
10110 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10111 }
10112
10113 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10114
10115 if (pVCpu->iem.s.cActiveMappings > 0)
10116 iemMemRollback(pVCpu);
10117
10118 return rcStrict;
10119}
10120
10121
10122/**
10123 * Injects the active TRPM event.
10124 *
10125 * @returns Strict VBox status code.
10126 * @param pVCpu The cross context virtual CPU structure.
10127 */
10128VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10129{
10130#ifndef IEM_IMPLEMENTS_TASKSWITCH
10131 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10132#else
10133 uint8_t u8TrapNo;
10134 TRPMEVENT enmType;
10135 uint32_t uErrCode;
10136 RTGCUINTPTR uCr2;
10137 uint8_t cbInstr;
10138 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10139 if (RT_FAILURE(rc))
10140 return rc;
10141
10142 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10143 * ICEBP \#DB injection as a special case. */
10144 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10145#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10146 if (rcStrict == VINF_SVM_VMEXIT)
10147 rcStrict = VINF_SUCCESS;
10148#endif
10149#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10150 if (rcStrict == VINF_VMX_VMEXIT)
10151 rcStrict = VINF_SUCCESS;
10152#endif
10153 /** @todo Are there any other codes that imply the event was successfully
10154 * delivered to the guest? See @bugref{6607}. */
10155 if ( rcStrict == VINF_SUCCESS
10156 || rcStrict == VINF_IEM_RAISED_XCPT)
10157 TRPMResetTrap(pVCpu);
10158
10159 return rcStrict;
10160#endif
10161}
10162
10163
10164VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10165{
10166 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10167 return VERR_NOT_IMPLEMENTED;
10168}
10169
10170
10171VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10172{
10173 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10174 return VERR_NOT_IMPLEMENTED;
10175}
10176
10177
10178#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10179/**
10180 * Executes a IRET instruction with default operand size.
10181 *
10182 * This is for PATM.
10183 *
10184 * @returns VBox status code.
10185 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10186 * @param pCtxCore The register frame.
10187 */
10188VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10189{
10190 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10191
10192 iemCtxCoreToCtx(pCtx, pCtxCore);
10193 iemInitDecoder(pVCpu);
10194 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10195 if (rcStrict == VINF_SUCCESS)
10196 iemCtxToCtxCore(pCtxCore, pCtx);
10197 else
10198 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10199 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10200 return rcStrict;
10201}
10202#endif
10203
10204
10205/**
10206 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10207 *
10208 * This API ASSUMES that the caller has already verified that the guest code is
10209 * allowed to access the I/O port. (The I/O port is in the DX register in the
10210 * guest state.)
10211 *
10212 * @returns Strict VBox status code.
10213 * @param pVCpu The cross context virtual CPU structure.
10214 * @param cbValue The size of the I/O port access (1, 2, or 4).
10215 * @param enmAddrMode The addressing mode.
10216 * @param fRepPrefix Indicates whether a repeat prefix is used
10217 * (doesn't matter which for this instruction).
10218 * @param cbInstr The instruction length in bytes.
10219 * @param iEffSeg The effective segment address.
10220 * @param fIoChecked Whether the access to the I/O port has been
10221 * checked or not. It's typically checked in the
10222 * HM scenario.
10223 */
10224VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10225 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10226{
10227 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10228 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10229
10230 /*
10231 * State init.
10232 */
10233 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10234
10235 /*
10236 * Switch orgy for getting to the right handler.
10237 */
10238 VBOXSTRICTRC rcStrict;
10239 if (fRepPrefix)
10240 {
10241 switch (enmAddrMode)
10242 {
10243 case IEMMODE_16BIT:
10244 switch (cbValue)
10245 {
10246 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10247 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10248 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10249 default:
10250 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10251 }
10252 break;
10253
10254 case IEMMODE_32BIT:
10255 switch (cbValue)
10256 {
10257 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10258 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10259 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10260 default:
10261 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10262 }
10263 break;
10264
10265 case IEMMODE_64BIT:
10266 switch (cbValue)
10267 {
10268 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10269 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10270 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10271 default:
10272 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10273 }
10274 break;
10275
10276 default:
10277 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10278 }
10279 }
10280 else
10281 {
10282 switch (enmAddrMode)
10283 {
10284 case IEMMODE_16BIT:
10285 switch (cbValue)
10286 {
10287 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10288 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10289 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10290 default:
10291 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10292 }
10293 break;
10294
10295 case IEMMODE_32BIT:
10296 switch (cbValue)
10297 {
10298 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10299 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10300 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10301 default:
10302 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10303 }
10304 break;
10305
10306 case IEMMODE_64BIT:
10307 switch (cbValue)
10308 {
10309 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10310 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10311 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10312 default:
10313 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10314 }
10315 break;
10316
10317 default:
10318 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10319 }
10320 }
10321
10322 if (pVCpu->iem.s.cActiveMappings)
10323 iemMemRollback(pVCpu);
10324
10325 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10326}
10327
10328
10329/**
10330 * Interface for HM and EM for executing string I/O IN (read) instructions.
10331 *
10332 * This API ASSUMES that the caller has already verified that the guest code is
10333 * allowed to access the I/O port. (The I/O port is in the DX register in the
10334 * guest state.)
10335 *
10336 * @returns Strict VBox status code.
10337 * @param pVCpu The cross context virtual CPU structure.
10338 * @param cbValue The size of the I/O port access (1, 2, or 4).
10339 * @param enmAddrMode The addressing mode.
10340 * @param fRepPrefix Indicates whether a repeat prefix is used
10341 * (doesn't matter which for this instruction).
10342 * @param cbInstr The instruction length in bytes.
10343 * @param fIoChecked Whether the access to the I/O port has been
10344 * checked or not. It's typically checked in the
10345 * HM scenario.
10346 */
10347VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10348 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10349{
10350 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10351
10352 /*
10353 * State init.
10354 */
10355 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10356
10357 /*
10358 * Switch orgy for getting to the right handler.
10359 */
10360 VBOXSTRICTRC rcStrict;
10361 if (fRepPrefix)
10362 {
10363 switch (enmAddrMode)
10364 {
10365 case IEMMODE_16BIT:
10366 switch (cbValue)
10367 {
10368 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10369 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10370 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10371 default:
10372 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10373 }
10374 break;
10375
10376 case IEMMODE_32BIT:
10377 switch (cbValue)
10378 {
10379 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10380 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10381 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10382 default:
10383 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10384 }
10385 break;
10386
10387 case IEMMODE_64BIT:
10388 switch (cbValue)
10389 {
10390 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10391 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10392 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10393 default:
10394 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10395 }
10396 break;
10397
10398 default:
10399 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10400 }
10401 }
10402 else
10403 {
10404 switch (enmAddrMode)
10405 {
10406 case IEMMODE_16BIT:
10407 switch (cbValue)
10408 {
10409 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10410 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10411 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10412 default:
10413 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10414 }
10415 break;
10416
10417 case IEMMODE_32BIT:
10418 switch (cbValue)
10419 {
10420 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10421 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10422 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10423 default:
10424 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10425 }
10426 break;
10427
10428 case IEMMODE_64BIT:
10429 switch (cbValue)
10430 {
10431 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10432 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10433 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10434 default:
10435 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10436 }
10437 break;
10438
10439 default:
10440 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10441 }
10442 }
10443
10444 if ( pVCpu->iem.s.cActiveMappings == 0
10445 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10446 { /* likely */ }
10447 else
10448 {
10449 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10450 iemMemRollback(pVCpu);
10451 }
10452 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10453}
10454
10455
10456/**
10457 * Interface for rawmode to write execute an OUT instruction.
10458 *
10459 * @returns Strict VBox status code.
10460 * @param pVCpu The cross context virtual CPU structure.
10461 * @param cbInstr The instruction length in bytes.
10462 * @param u16Port The port to read.
10463 * @param fImm Whether the port is specified using an immediate operand or
10464 * using the implicit DX register.
10465 * @param cbReg The register size.
10466 *
10467 * @remarks In ring-0 not all of the state needs to be synced in.
10468 */
10469VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10470{
10471 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10472 Assert(cbReg <= 4 && cbReg != 3);
10473
10474 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10475 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10476 Assert(!pVCpu->iem.s.cActiveMappings);
10477 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10478}
10479
10480
10481/**
10482 * Interface for rawmode to write execute an IN instruction.
10483 *
10484 * @returns Strict VBox status code.
10485 * @param pVCpu The cross context virtual CPU structure.
10486 * @param cbInstr The instruction length in bytes.
10487 * @param u16Port The port to read.
10488 * @param fImm Whether the port is specified using an immediate operand or
10489 * using the implicit DX.
10490 * @param cbReg The register size.
10491 */
10492VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10493{
10494 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10495 Assert(cbReg <= 4 && cbReg != 3);
10496
10497 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10498 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10499 Assert(!pVCpu->iem.s.cActiveMappings);
10500 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10501}
10502
10503
10504/**
10505 * Interface for HM and EM to write to a CRx register.
10506 *
10507 * @returns Strict VBox status code.
10508 * @param pVCpu The cross context virtual CPU structure.
10509 * @param cbInstr The instruction length in bytes.
10510 * @param iCrReg The control register number (destination).
10511 * @param iGReg The general purpose register number (source).
10512 *
10513 * @remarks In ring-0 not all of the state needs to be synced in.
10514 */
10515VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10516{
10517 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10518 Assert(iCrReg < 16);
10519 Assert(iGReg < 16);
10520
10521 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10522 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10523 Assert(!pVCpu->iem.s.cActiveMappings);
10524 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10525}
10526
10527
10528/**
10529 * Interface for HM and EM to read from a CRx register.
10530 *
10531 * @returns Strict VBox status code.
10532 * @param pVCpu The cross context virtual CPU structure.
10533 * @param cbInstr The instruction length in bytes.
10534 * @param iGReg The general purpose register number (destination).
10535 * @param iCrReg The control register number (source).
10536 *
10537 * @remarks In ring-0 not all of the state needs to be synced in.
10538 */
10539VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10540{
10541 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10542 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10543 | CPUMCTX_EXTRN_APIC_TPR);
10544 Assert(iCrReg < 16);
10545 Assert(iGReg < 16);
10546
10547 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10548 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10549 Assert(!pVCpu->iem.s.cActiveMappings);
10550 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10551}
10552
10553
10554/**
10555 * Interface for HM and EM to clear the CR0[TS] bit.
10556 *
10557 * @returns Strict VBox status code.
10558 * @param pVCpu The cross context virtual CPU structure.
10559 * @param cbInstr The instruction length in bytes.
10560 *
10561 * @remarks In ring-0 not all of the state needs to be synced in.
10562 */
10563VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10564{
10565 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10566
10567 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10568 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10569 Assert(!pVCpu->iem.s.cActiveMappings);
10570 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10571}
10572
10573
10574/**
10575 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10576 *
10577 * @returns Strict VBox status code.
10578 * @param pVCpu The cross context virtual CPU structure.
10579 * @param cbInstr The instruction length in bytes.
10580 * @param uValue The value to load into CR0.
10581 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10582 * memory operand. Otherwise pass NIL_RTGCPTR.
10583 *
10584 * @remarks In ring-0 not all of the state needs to be synced in.
10585 */
10586VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10587{
10588 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10589
10590 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10591 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10592 Assert(!pVCpu->iem.s.cActiveMappings);
10593 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10594}
10595
10596
10597/**
10598 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10599 *
10600 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10601 *
10602 * @returns Strict VBox status code.
10603 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10604 * @param cbInstr The instruction length in bytes.
10605 * @remarks In ring-0 not all of the state needs to be synced in.
10606 * @thread EMT(pVCpu)
10607 */
10608VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10609{
10610 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10611
10612 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10613 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10614 Assert(!pVCpu->iem.s.cActiveMappings);
10615 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10616}
10617
10618
10619/**
10620 * Interface for HM and EM to emulate the WBINVD instruction.
10621 *
10622 * @returns Strict VBox status code.
10623 * @param pVCpu The cross context virtual CPU structure.
10624 * @param cbInstr The instruction length in bytes.
10625 *
10626 * @remarks In ring-0 not all of the state needs to be synced in.
10627 */
10628VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10629{
10630 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10631
10632 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10633 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10634 Assert(!pVCpu->iem.s.cActiveMappings);
10635 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10636}
10637
10638
10639/**
10640 * Interface for HM and EM to emulate the INVD instruction.
10641 *
10642 * @returns Strict VBox status code.
10643 * @param pVCpu The cross context virtual CPU structure.
10644 * @param cbInstr The instruction length in bytes.
10645 *
10646 * @remarks In ring-0 not all of the state needs to be synced in.
10647 */
10648VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10649{
10650 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10651
10652 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10653 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10654 Assert(!pVCpu->iem.s.cActiveMappings);
10655 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10656}
10657
10658
10659/**
10660 * Interface for HM and EM to emulate the INVLPG instruction.
10661 *
10662 * @returns Strict VBox status code.
10663 * @retval VINF_PGM_SYNC_CR3
10664 *
10665 * @param pVCpu The cross context virtual CPU structure.
10666 * @param cbInstr The instruction length in bytes.
10667 * @param GCPtrPage The effective address of the page to invalidate.
10668 *
10669 * @remarks In ring-0 not all of the state needs to be synced in.
10670 */
10671VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10672{
10673 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10674
10675 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10676 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10677 Assert(!pVCpu->iem.s.cActiveMappings);
10678 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10679}
10680
10681
10682/**
10683 * Interface for HM and EM to emulate the INVPCID instruction.
10684 *
10685 * @returns Strict VBox status code.
10686 * @retval VINF_PGM_SYNC_CR3
10687 *
10688 * @param pVCpu The cross context virtual CPU structure.
10689 * @param cbInstr The instruction length in bytes.
10690 * @param iEffSeg The effective segment register.
10691 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10692 * @param uType The invalidation type.
10693 *
10694 * @remarks In ring-0 not all of the state needs to be synced in.
10695 */
10696VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10697 uint64_t uType)
10698{
10699 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10700
10701 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10702 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10703 Assert(!pVCpu->iem.s.cActiveMappings);
10704 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10705}
10706
10707
10708/**
10709 * Interface for HM and EM to emulate the CPUID instruction.
10710 *
10711 * @returns Strict VBox status code.
10712 *
10713 * @param pVCpu The cross context virtual CPU structure.
10714 * @param cbInstr The instruction length in bytes.
10715 *
10716 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10717 */
10718VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10719{
10720 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10721 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10722
10723 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10724 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10725 Assert(!pVCpu->iem.s.cActiveMappings);
10726 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10727}
10728
10729
10730/**
10731 * Interface for HM and EM to emulate the RDPMC instruction.
10732 *
10733 * @returns Strict VBox status code.
10734 *
10735 * @param pVCpu The cross context virtual CPU structure.
10736 * @param cbInstr The instruction length in bytes.
10737 *
10738 * @remarks Not all of the state needs to be synced in.
10739 */
10740VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10741{
10742 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10743 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10744
10745 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10746 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10747 Assert(!pVCpu->iem.s.cActiveMappings);
10748 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10749}
10750
10751
10752/**
10753 * Interface for HM and EM to emulate the RDTSC instruction.
10754 *
10755 * @returns Strict VBox status code.
10756 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10757 *
10758 * @param pVCpu The cross context virtual CPU structure.
10759 * @param cbInstr The instruction length in bytes.
10760 *
10761 * @remarks Not all of the state needs to be synced in.
10762 */
10763VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10764{
10765 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10766 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10767
10768 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10769 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10770 Assert(!pVCpu->iem.s.cActiveMappings);
10771 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10772}
10773
10774
10775/**
10776 * Interface for HM and EM to emulate the RDTSCP instruction.
10777 *
10778 * @returns Strict VBox status code.
10779 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10780 *
10781 * @param pVCpu The cross context virtual CPU structure.
10782 * @param cbInstr The instruction length in bytes.
10783 *
10784 * @remarks Not all of the state needs to be synced in. Recommended
10785 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10786 */
10787VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10788{
10789 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10790 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10791
10792 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10793 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10794 Assert(!pVCpu->iem.s.cActiveMappings);
10795 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10796}
10797
10798
10799/**
10800 * Interface for HM and EM to emulate the RDMSR instruction.
10801 *
10802 * @returns Strict VBox status code.
10803 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10804 *
10805 * @param pVCpu The cross context virtual CPU structure.
10806 * @param cbInstr The instruction length in bytes.
10807 *
10808 * @remarks Not all of the state needs to be synced in. Requires RCX and
10809 * (currently) all MSRs.
10810 */
10811VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10812{
10813 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10814 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10815
10816 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10817 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10818 Assert(!pVCpu->iem.s.cActiveMappings);
10819 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10820}
10821
10822
10823/**
10824 * Interface for HM and EM to emulate the WRMSR instruction.
10825 *
10826 * @returns Strict VBox status code.
10827 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10828 *
10829 * @param pVCpu The cross context virtual CPU structure.
10830 * @param cbInstr The instruction length in bytes.
10831 *
10832 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10833 * and (currently) all MSRs.
10834 */
10835VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10836{
10837 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10838 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10839 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10840
10841 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10842 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10843 Assert(!pVCpu->iem.s.cActiveMappings);
10844 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10845}
10846
10847
10848/**
10849 * Interface for HM and EM to emulate the MONITOR instruction.
10850 *
10851 * @returns Strict VBox status code.
10852 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10853 *
10854 * @param pVCpu The cross context virtual CPU structure.
10855 * @param cbInstr The instruction length in bytes.
10856 *
10857 * @remarks Not all of the state needs to be synced in.
10858 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10859 * are used.
10860 */
10861VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10862{
10863 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10864 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10865
10866 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10867 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10868 Assert(!pVCpu->iem.s.cActiveMappings);
10869 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10870}
10871
10872
10873/**
10874 * Interface for HM and EM to emulate the MWAIT instruction.
10875 *
10876 * @returns Strict VBox status code.
10877 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10878 *
10879 * @param pVCpu The cross context virtual CPU structure.
10880 * @param cbInstr The instruction length in bytes.
10881 *
10882 * @remarks Not all of the state needs to be synced in.
10883 */
10884VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10885{
10886 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10887 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10888
10889 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10890 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10891 Assert(!pVCpu->iem.s.cActiveMappings);
10892 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10893}
10894
10895
10896/**
10897 * Interface for HM and EM to emulate the HLT instruction.
10898 *
10899 * @returns Strict VBox status code.
10900 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10901 *
10902 * @param pVCpu The cross context virtual CPU structure.
10903 * @param cbInstr The instruction length in bytes.
10904 *
10905 * @remarks Not all of the state needs to be synced in.
10906 */
10907VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10908{
10909 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10910
10911 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10912 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10913 Assert(!pVCpu->iem.s.cActiveMappings);
10914 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10915}
10916
10917
10918/**
10919 * Checks if IEM is in the process of delivering an event (interrupt or
10920 * exception).
10921 *
10922 * @returns true if we're in the process of raising an interrupt or exception,
10923 * false otherwise.
10924 * @param pVCpu The cross context virtual CPU structure.
10925 * @param puVector Where to store the vector associated with the
10926 * currently delivered event, optional.
10927 * @param pfFlags Where to store th event delivery flags (see
10928 * IEM_XCPT_FLAGS_XXX), optional.
10929 * @param puErr Where to store the error code associated with the
10930 * event, optional.
10931 * @param puCr2 Where to store the CR2 associated with the event,
10932 * optional.
10933 * @remarks The caller should check the flags to determine if the error code and
10934 * CR2 are valid for the event.
10935 */
10936VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10937{
10938 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10939 if (fRaisingXcpt)
10940 {
10941 if (puVector)
10942 *puVector = pVCpu->iem.s.uCurXcpt;
10943 if (pfFlags)
10944 *pfFlags = pVCpu->iem.s.fCurXcpt;
10945 if (puErr)
10946 *puErr = pVCpu->iem.s.uCurXcptErr;
10947 if (puCr2)
10948 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10949 }
10950 return fRaisingXcpt;
10951}
10952
10953#ifdef IN_RING3
10954
10955/**
10956 * Handles the unlikely and probably fatal merge cases.
10957 *
10958 * @returns Merged status code.
10959 * @param rcStrict Current EM status code.
10960 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10961 * with @a rcStrict.
10962 * @param iMemMap The memory mapping index. For error reporting only.
10963 * @param pVCpu The cross context virtual CPU structure of the calling
10964 * thread, for error reporting only.
10965 */
10966DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10967 unsigned iMemMap, PVMCPUCC pVCpu)
10968{
10969 if (RT_FAILURE_NP(rcStrict))
10970 return rcStrict;
10971
10972 if (RT_FAILURE_NP(rcStrictCommit))
10973 return rcStrictCommit;
10974
10975 if (rcStrict == rcStrictCommit)
10976 return rcStrictCommit;
10977
10978 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10979 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10980 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10981 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10982 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10983 return VERR_IOM_FF_STATUS_IPE;
10984}
10985
10986
10987/**
10988 * Helper for IOMR3ProcessForceFlag.
10989 *
10990 * @returns Merged status code.
10991 * @param rcStrict Current EM status code.
10992 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10993 * with @a rcStrict.
10994 * @param iMemMap The memory mapping index. For error reporting only.
10995 * @param pVCpu The cross context virtual CPU structure of the calling
10996 * thread, for error reporting only.
10997 */
10998DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10999{
11000 /* Simple. */
11001 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11002 return rcStrictCommit;
11003
11004 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11005 return rcStrict;
11006
11007 /* EM scheduling status codes. */
11008 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11009 && rcStrict <= VINF_EM_LAST))
11010 {
11011 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11012 && rcStrictCommit <= VINF_EM_LAST))
11013 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11014 }
11015
11016 /* Unlikely */
11017 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11018}
11019
11020
11021/**
11022 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11023 *
11024 * @returns Merge between @a rcStrict and what the commit operation returned.
11025 * @param pVM The cross context VM structure.
11026 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11027 * @param rcStrict The status code returned by ring-0 or raw-mode.
11028 */
11029VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11030{
11031 /*
11032 * Reset the pending commit.
11033 */
11034 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11035 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11036 ("%#x %#x %#x\n",
11037 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11038 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11039
11040 /*
11041 * Commit the pending bounce buffers (usually just one).
11042 */
11043 unsigned cBufs = 0;
11044 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11045 while (iMemMap-- > 0)
11046 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11047 {
11048 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11049 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11050 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11051
11052 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11053 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11054 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11055
11056 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11057 {
11058 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11059 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11060 pbBuf,
11061 cbFirst,
11062 PGMACCESSORIGIN_IEM);
11063 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11064 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11065 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11066 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11067 }
11068
11069 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11070 {
11071 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11072 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11073 pbBuf + cbFirst,
11074 cbSecond,
11075 PGMACCESSORIGIN_IEM);
11076 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11077 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11078 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11079 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11080 }
11081 cBufs++;
11082 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11083 }
11084
11085 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11086 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11087 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11088 pVCpu->iem.s.cActiveMappings = 0;
11089 return rcStrict;
11090}
11091
11092#endif /* IN_RING3 */
11093
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette