VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 95085

Last change on this file since 95085 was 94868, checked in by vboxsync, 3 years ago

VMM/IEM: More TLB work. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 447.0 KB
Line 
1/* $Id: IEMAll.cpp 94868 2022-05-05 20:47:25Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow) : Basic enter/exit IEM state info.
65 * - Level 2 (Log2) : ?
66 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
67 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5) : Decoding details.
69 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7) : iret++ execution logging.
71 * - Level 8 (Log8) : Memory writes.
72 * - Level 9 (Log9) : Memory reads.
73 * - Level 10 (Log10): TLBs.
74 */
75
76/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
77#ifdef _MSC_VER
78# pragma warning(disable:4505)
79#endif
80
81
82/*********************************************************************************************************************************
83* Header Files *
84*********************************************************************************************************************************/
85#define LOG_GROUP LOG_GROUP_IEM
86#define VMCPU_INCL_CPUM_GST_CTX
87#include <VBox/vmm/iem.h>
88#include <VBox/vmm/cpum.h>
89#include <VBox/vmm/apic.h>
90#include <VBox/vmm/pdm.h>
91#include <VBox/vmm/pgm.h>
92#include <VBox/vmm/iom.h>
93#include <VBox/vmm/em.h>
94#include <VBox/vmm/hm.h>
95#include <VBox/vmm/nem.h>
96#include <VBox/vmm/gim.h>
97#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
98# include <VBox/vmm/em.h>
99# include <VBox/vmm/hm_svm.h>
100#endif
101#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
102# include <VBox/vmm/hmvmxinline.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#include "IEMInternal.h"
108#include <VBox/vmm/vmcc.h>
109#include <VBox/log.h>
110#include <VBox/err.h>
111#include <VBox/param.h>
112#include <VBox/dis.h>
113#include <VBox/disopcode.h>
114#include <iprt/asm-math.h>
115#include <iprt/assert.h>
116#include <iprt/string.h>
117#include <iprt/x86.h>
118
119#include "IEMInline.h"
120
121
122/*********************************************************************************************************************************
123* Structures and Typedefs *
124*********************************************************************************************************************************/
125/**
126 * CPU exception classes.
127 */
128typedef enum IEMXCPTCLASS
129{
130 IEMXCPTCLASS_BENIGN,
131 IEMXCPTCLASS_CONTRIBUTORY,
132 IEMXCPTCLASS_PAGE_FAULT,
133 IEMXCPTCLASS_DOUBLE_FAULT
134} IEMXCPTCLASS;
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140#if defined(IEM_LOG_MEMORY_WRITES)
141/** What IEM just wrote. */
142uint8_t g_abIemWrote[256];
143/** How much IEM just wrote. */
144size_t g_cbIemWrote;
145#endif
146
147
148/*********************************************************************************************************************************
149* Internal Functions *
150*********************************************************************************************************************************/
151static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
152 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
153
154
155/**
156 * Initializes the decoder state.
157 *
158 * iemReInitDecoder is mostly a copy of this function.
159 *
160 * @param pVCpu The cross context virtual CPU structure of the
161 * calling thread.
162 * @param fBypassHandlers Whether to bypass access handlers.
163 * @param fDisregardLock Whether to disregard the LOCK prefix.
164 */
165DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
166{
167 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
168 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
177
178 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
179 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
180 pVCpu->iem.s.enmCpuMode = enmMode;
181 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
182 pVCpu->iem.s.enmEffAddrMode = enmMode;
183 if (enmMode != IEMMODE_64BIT)
184 {
185 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
186 pVCpu->iem.s.enmEffOpSize = enmMode;
187 }
188 else
189 {
190 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
191 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
192 }
193 pVCpu->iem.s.fPrefixes = 0;
194 pVCpu->iem.s.uRexReg = 0;
195 pVCpu->iem.s.uRexB = 0;
196 pVCpu->iem.s.uRexIndex = 0;
197 pVCpu->iem.s.idxPrefix = 0;
198 pVCpu->iem.s.uVex3rdReg = 0;
199 pVCpu->iem.s.uVexLength = 0;
200 pVCpu->iem.s.fEvexStuff = 0;
201 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
202#ifdef IEM_WITH_CODE_TLB
203 pVCpu->iem.s.pbInstrBuf = NULL;
204 pVCpu->iem.s.offInstrNextByte = 0;
205 pVCpu->iem.s.offCurInstrStart = 0;
206# ifdef VBOX_STRICT
207 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
208 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
209 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
210# endif
211#else
212 pVCpu->iem.s.offOpcode = 0;
213 pVCpu->iem.s.cbOpcode = 0;
214#endif
215 pVCpu->iem.s.offModRm = 0;
216 pVCpu->iem.s.cActiveMappings = 0;
217 pVCpu->iem.s.iNextMapping = 0;
218 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
219 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
220 pVCpu->iem.s.fDisregardLock = fDisregardLock;
221
222#ifdef DBGFTRACE_ENABLED
223 switch (enmMode)
224 {
225 case IEMMODE_64BIT:
226 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
227 break;
228 case IEMMODE_32BIT:
229 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
230 break;
231 case IEMMODE_16BIT:
232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
233 break;
234 }
235#endif
236}
237
238
239/**
240 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
241 *
242 * This is mostly a copy of iemInitDecoder.
243 *
244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
245 */
246DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
247{
248 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
252 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
253 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
257
258 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
259 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
260 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
261 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
262 pVCpu->iem.s.enmEffAddrMode = enmMode;
263 if (enmMode != IEMMODE_64BIT)
264 {
265 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
266 pVCpu->iem.s.enmEffOpSize = enmMode;
267 }
268 else
269 {
270 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
271 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
272 }
273 pVCpu->iem.s.fPrefixes = 0;
274 pVCpu->iem.s.uRexReg = 0;
275 pVCpu->iem.s.uRexB = 0;
276 pVCpu->iem.s.uRexIndex = 0;
277 pVCpu->iem.s.idxPrefix = 0;
278 pVCpu->iem.s.uVex3rdReg = 0;
279 pVCpu->iem.s.uVexLength = 0;
280 pVCpu->iem.s.fEvexStuff = 0;
281 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
282#ifdef IEM_WITH_CODE_TLB
283 if (pVCpu->iem.s.pbInstrBuf)
284 {
285 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
286 - pVCpu->iem.s.uInstrBufPc;
287 if (off < pVCpu->iem.s.cbInstrBufTotal)
288 {
289 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
290 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
291 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
292 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
293 else
294 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
295 }
296 else
297 {
298 pVCpu->iem.s.pbInstrBuf = NULL;
299 pVCpu->iem.s.offInstrNextByte = 0;
300 pVCpu->iem.s.offCurInstrStart = 0;
301 pVCpu->iem.s.cbInstrBuf = 0;
302 pVCpu->iem.s.cbInstrBufTotal = 0;
303 }
304 }
305 else
306 {
307 pVCpu->iem.s.offInstrNextByte = 0;
308 pVCpu->iem.s.offCurInstrStart = 0;
309 pVCpu->iem.s.cbInstrBuf = 0;
310 pVCpu->iem.s.cbInstrBufTotal = 0;
311 }
312#else
313 pVCpu->iem.s.cbOpcode = 0;
314 pVCpu->iem.s.offOpcode = 0;
315#endif
316 pVCpu->iem.s.offModRm = 0;
317 Assert(pVCpu->iem.s.cActiveMappings == 0);
318 pVCpu->iem.s.iNextMapping = 0;
319 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
320 Assert(pVCpu->iem.s.fBypassHandlers == false);
321
322#ifdef DBGFTRACE_ENABLED
323 switch (enmMode)
324 {
325 case IEMMODE_64BIT:
326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
327 break;
328 case IEMMODE_32BIT:
329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
330 break;
331 case IEMMODE_16BIT:
332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
333 break;
334 }
335#endif
336}
337
338
339
340/**
341 * Prefetch opcodes the first time when starting executing.
342 *
343 * @returns Strict VBox status code.
344 * @param pVCpu The cross context virtual CPU structure of the
345 * calling thread.
346 * @param fBypassHandlers Whether to bypass access handlers.
347 * @param fDisregardLock Whether to disregard LOCK prefixes.
348 *
349 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
350 * store them as such.
351 */
352static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
353{
354 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
355
356#ifdef IEM_WITH_CODE_TLB
357 /** @todo Do ITLB lookup here. */
358
359#else /* !IEM_WITH_CODE_TLB */
360
361 /*
362 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
363 *
364 * First translate CS:rIP to a physical address.
365 */
366 uint32_t cbToTryRead;
367 RTGCPTR GCPtrPC;
368 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
369 {
370 cbToTryRead = GUEST_PAGE_SIZE;
371 GCPtrPC = pVCpu->cpum.GstCtx.rip;
372 if (IEM_IS_CANONICAL(GCPtrPC))
373 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
374 else
375 return iemRaiseGeneralProtectionFault0(pVCpu);
376 }
377 else
378 {
379 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
380 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
381 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
382 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
383 else
384 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
385 if (cbToTryRead) { /* likely */ }
386 else /* overflowed */
387 {
388 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
389 cbToTryRead = UINT32_MAX;
390 }
391 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
392 Assert(GCPtrPC <= UINT32_MAX);
393 }
394
395 PGMPTWALK Walk;
396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
397 if (RT_SUCCESS(rc))
398 Assert(Walk.fSucceeded); /* probable. */
399 else
400 {
401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
402#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
403 if (Walk.fFailed & PGM_WALKFAIL_EPT)
404 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
405#endif
406 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
407 }
408 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
409 else
410 {
411 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
413 if (Walk.fFailed & PGM_WALKFAIL_EPT)
414 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
415#endif
416 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
417 }
418 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
419 else
420 {
421 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
422#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
423 if (Walk.fFailed & PGM_WALKFAIL_EPT)
424 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
425#endif
426 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
427 }
428 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
429 /** @todo Check reserved bits and such stuff. PGM is better at doing
430 * that, so do it when implementing the guest virtual address
431 * TLB... */
432
433 /*
434 * Read the bytes at this address.
435 */
436 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
437 if (cbToTryRead > cbLeftOnPage)
438 cbToTryRead = cbLeftOnPage;
439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
441
442 if (!pVCpu->iem.s.fBypassHandlers)
443 {
444 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
446 { /* likely */ }
447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
448 {
449 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
450 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
452 }
453 else
454 {
455 Log((RT_SUCCESS(rcStrict)
456 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
457 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
458 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
459 return rcStrict;
460 }
461 }
462 else
463 {
464 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
465 if (RT_SUCCESS(rc))
466 { /* likely */ }
467 else
468 {
469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
470 GCPtrPC, GCPhys, rc, cbToTryRead));
471 return rc;
472 }
473 }
474 pVCpu->iem.s.cbOpcode = cbToTryRead;
475#endif /* !IEM_WITH_CODE_TLB */
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Invalidates the IEM TLBs.
482 *
483 * This is called internally as well as by PGM when moving GC mappings.
484 *
485 * @returns
486 * @param pVCpu The cross context virtual CPU structure of the calling
487 * thread.
488 */
489VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
490{
491#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
492 Log10(("IEMTlbInvalidateAll\n"));
493# ifdef IEM_WITH_CODE_TLB
494 pVCpu->iem.s.cbInstrBufTotal = 0;
495 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
496 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
497 { /* very likely */ }
498 else
499 {
500 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
501 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
502 while (i-- > 0)
503 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
504 }
505# endif
506
507# ifdef IEM_WITH_DATA_TLB
508 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
509 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
510 { /* very likely */ }
511 else
512 {
513 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
515 while (i-- > 0)
516 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
517 }
518# endif
519#else
520 RT_NOREF(pVCpu);
521#endif
522}
523
524
525/**
526 * Invalidates a page in the TLBs.
527 *
528 * @param pVCpu The cross context virtual CPU structure of the calling
529 * thread.
530 * @param GCPtr The address of the page to invalidate
531 * @thread EMT(pVCpu)
532 */
533VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
534{
535#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
536 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
537 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
538 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
539
540# ifdef IEM_WITH_CODE_TLB
541 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
542 {
543 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
544 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
545 pVCpu->iem.s.cbInstrBufTotal = 0;
546 }
547# endif
548
549# ifdef IEM_WITH_DATA_TLB
550 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
551 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
552# endif
553#else
554 NOREF(pVCpu); NOREF(GCPtr);
555#endif
556}
557
558
559#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
560/**
561 * Invalid both TLBs slow fashion following a rollover.
562 *
563 * Worker for IEMTlbInvalidateAllPhysical,
564 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
565 * iemMemMapJmp and others.
566 *
567 * @thread EMT(pVCpu)
568 */
569static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
570{
571 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
572 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
573 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
574
575 unsigned i;
576# ifdef IEM_WITH_CODE_TLB
577 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
578 while (i-- > 0)
579 {
580 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
581 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
582 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
583 }
584# endif
585# ifdef IEM_WITH_DATA_TLB
586 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
587 while (i-- > 0)
588 {
589 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
590 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
591 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
592 }
593# endif
594
595}
596#endif
597
598
599/**
600 * Invalidates the host physical aspects of the IEM TLBs.
601 *
602 * This is called internally as well as by PGM when moving GC mappings.
603 *
604 * @param pVCpu The cross context virtual CPU structure of the calling
605 * thread.
606 * @note Currently not used.
607 */
608VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
609{
610#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
611 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
612 Log10(("IEMTlbInvalidateAllPhysical\n"));
613
614# ifdef IEM_WITH_CODE_TLB
615 pVCpu->iem.s.cbInstrBufTotal = 0;
616# endif
617 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
618 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
619 {
620 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
621 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
622 }
623 else
624 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
625#else
626 NOREF(pVCpu);
627#endif
628}
629
630
631/**
632 * Invalidates the host physical aspects of the IEM TLBs.
633 *
634 * This is called internally as well as by PGM when moving GC mappings.
635 *
636 * @param pVM The cross context VM structure.
637 * @param idCpuCaller The ID of the calling EMT if available to the caller,
638 * otherwise NIL_VMCPUID.
639 *
640 * @remarks Caller holds the PGM lock.
641 */
642VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
643{
644#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
645 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
646 if (pVCpuCaller)
647 VMCPU_ASSERT_EMT(pVCpuCaller);
648 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
649
650 VMCC_FOR_EACH_VMCPU(pVM)
651 {
652# ifdef IEM_WITH_CODE_TLB
653 if (pVCpuCaller == pVCpu)
654 pVCpu->iem.s.cbInstrBufTotal = 0;
655# endif
656
657 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
658 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
659 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
660 { /* likely */}
661 else if (pVCpuCaller == pVCpu)
662 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
663 else
664 {
665 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
666 continue;
667 }
668 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
669 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
670 }
671 VMCC_FOR_EACH_VMCPU_END(pVM);
672
673#else
674 RT_NOREF(pVM, idCpuCaller);
675#endif
676}
677
678#ifdef IEM_WITH_CODE_TLB
679
680/**
681 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
682 * failure and jumps.
683 *
684 * We end up here for a number of reasons:
685 * - pbInstrBuf isn't yet initialized.
686 * - Advancing beyond the buffer boundrary (e.g. cross page).
687 * - Advancing beyond the CS segment limit.
688 * - Fetching from non-mappable page (e.g. MMIO).
689 *
690 * @param pVCpu The cross context virtual CPU structure of the
691 * calling thread.
692 * @param pvDst Where to return the bytes.
693 * @param cbDst Number of bytes to read.
694 *
695 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
696 */
697void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
698{
699#ifdef IN_RING3
700 for (;;)
701 {
702 Assert(cbDst <= 8);
703 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
704
705 /*
706 * We might have a partial buffer match, deal with that first to make the
707 * rest simpler. This is the first part of the cross page/buffer case.
708 */
709 if (pVCpu->iem.s.pbInstrBuf != NULL)
710 {
711 if (offBuf < pVCpu->iem.s.cbInstrBuf)
712 {
713 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
714 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
715 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
716
717 cbDst -= cbCopy;
718 pvDst = (uint8_t *)pvDst + cbCopy;
719 offBuf += cbCopy;
720 pVCpu->iem.s.offInstrNextByte += offBuf;
721 }
722 }
723
724 /*
725 * Check segment limit, figuring how much we're allowed to access at this point.
726 *
727 * We will fault immediately if RIP is past the segment limit / in non-canonical
728 * territory. If we do continue, there are one or more bytes to read before we
729 * end up in trouble and we need to do that first before faulting.
730 */
731 RTGCPTR GCPtrFirst;
732 uint32_t cbMaxRead;
733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
734 {
735 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
736 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
737 { /* likely */ }
738 else
739 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
740 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
741 }
742 else
743 {
744 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
745 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
746 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
747 { /* likely */ }
748 else
749 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
750 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
751 if (cbMaxRead != 0)
752 { /* likely */ }
753 else
754 {
755 /* Overflowed because address is 0 and limit is max. */
756 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
757 cbMaxRead = X86_PAGE_SIZE;
758 }
759 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
760 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
761 if (cbMaxRead2 < cbMaxRead)
762 cbMaxRead = cbMaxRead2;
763 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
764 }
765
766 /*
767 * Get the TLB entry for this piece of code.
768 */
769 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
770 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
771 if (pTlbe->uTag == uTag)
772 {
773 /* likely when executing lots of code, otherwise unlikely */
774# ifdef VBOX_WITH_STATISTICS
775 pVCpu->iem.s.CodeTlb.cTlbHits++;
776# endif
777 }
778 else
779 {
780 pVCpu->iem.s.CodeTlb.cTlbMisses++;
781 PGMPTWALK Walk;
782 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
783 if (RT_FAILURE(rc))
784 {
785#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
786 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
787 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
788#endif
789 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
790 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
791 }
792
793 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
794 Assert(Walk.fSucceeded);
795 pTlbe->uTag = uTag;
796 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
797 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
798 pTlbe->GCPhys = Walk.GCPhys;
799 pTlbe->pbMappingR3 = NULL;
800 }
801
802 /*
803 * Check TLB page table level access flags.
804 */
805 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
806 {
807 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
808 {
809 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
810 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
811 }
812 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
813 {
814 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
815 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
816 }
817 }
818
819 /*
820 * Look up the physical page info if necessary.
821 */
822 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
823 { /* not necessary */ }
824 else
825 {
826 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
827 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
828 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
829 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
830 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
831 { /* likely */ }
832 else
833 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
834 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
835 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
836 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
837 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
838 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
839 }
840
841# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
842 /*
843 * Try do a direct read using the pbMappingR3 pointer.
844 */
845 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
846 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
847 {
848 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
849 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
850 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
851 {
852 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
853 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
854 }
855 else
856 {
857 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
858 Assert(cbInstr < cbMaxRead);
859 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
860 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
861 }
862 if (cbDst <= cbMaxRead)
863 {
864 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
865 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
866 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
867 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
868 return;
869 }
870 pVCpu->iem.s.pbInstrBuf = NULL;
871
872 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
873 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
874 }
875 else
876# endif
877#if 0
878 /*
879 * If there is no special read handling, so we can read a bit more and
880 * put it in the prefetch buffer.
881 */
882 if ( cbDst < cbMaxRead
883 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
884 {
885 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
886 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
887 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
888 { /* likely */ }
889 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
890 {
891 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
893 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
894 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
895 }
896 else
897 {
898 Log((RT_SUCCESS(rcStrict)
899 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
900 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
901 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
902 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
903 }
904 }
905 /*
906 * Special read handling, so only read exactly what's needed.
907 * This is a highly unlikely scenario.
908 */
909 else
910#endif
911 {
912 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
913 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
914 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
915 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
916 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
917 { /* likely */ }
918 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
919 {
920 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
922 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
923 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
924 }
925 else
926 {
927 Log((RT_SUCCESS(rcStrict)
928 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
929 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
930 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
931 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
932 }
933 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
934 if (cbToRead == cbDst)
935 return;
936 }
937
938 /*
939 * More to read, loop.
940 */
941 cbDst -= cbMaxRead;
942 pvDst = (uint8_t *)pvDst + cbMaxRead;
943 }
944#else
945 RT_NOREF(pvDst, cbDst);
946 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
947#endif
948}
949
950#else
951
952/**
953 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
954 * exception if it fails.
955 *
956 * @returns Strict VBox status code.
957 * @param pVCpu The cross context virtual CPU structure of the
958 * calling thread.
959 * @param cbMin The minimum number of bytes relative offOpcode
960 * that must be read.
961 */
962VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
963{
964 /*
965 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
966 *
967 * First translate CS:rIP to a physical address.
968 */
969 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
970 uint32_t cbToTryRead;
971 RTGCPTR GCPtrNext;
972 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
973 {
974 cbToTryRead = GUEST_PAGE_SIZE;
975 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
976 if (!IEM_IS_CANONICAL(GCPtrNext))
977 return iemRaiseGeneralProtectionFault0(pVCpu);
978 }
979 else
980 {
981 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
982 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
983 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
984 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
985 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
986 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
987 if (!cbToTryRead) /* overflowed */
988 {
989 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
990 cbToTryRead = UINT32_MAX;
991 /** @todo check out wrapping around the code segment. */
992 }
993 if (cbToTryRead < cbMin - cbLeft)
994 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
995 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
996 }
997
998 /* Only read up to the end of the page, and make sure we don't read more
999 than the opcode buffer can hold. */
1000 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1001 if (cbToTryRead > cbLeftOnPage)
1002 cbToTryRead = cbLeftOnPage;
1003 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1004 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1005/** @todo r=bird: Convert assertion into undefined opcode exception? */
1006 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1007
1008 PGMPTWALK Walk;
1009 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1010 if (RT_FAILURE(rc))
1011 {
1012 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1014 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1015 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1016#endif
1017 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1018 }
1019 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1020 {
1021 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1023 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1024 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1025#endif
1026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1027 }
1028 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1029 {
1030 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1031#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1032 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1033 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1034#endif
1035 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1036 }
1037 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1038 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1039 /** @todo Check reserved bits and such stuff. PGM is better at doing
1040 * that, so do it when implementing the guest virtual address
1041 * TLB... */
1042
1043 /*
1044 * Read the bytes at this address.
1045 *
1046 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1047 * and since PATM should only patch the start of an instruction there
1048 * should be no need to check again here.
1049 */
1050 if (!pVCpu->iem.s.fBypassHandlers)
1051 {
1052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1053 cbToTryRead, PGMACCESSORIGIN_IEM);
1054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1055 { /* likely */ }
1056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1057 {
1058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1061 }
1062 else
1063 {
1064 Log((RT_SUCCESS(rcStrict)
1065 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1066 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1067 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1068 return rcStrict;
1069 }
1070 }
1071 else
1072 {
1073 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1074 if (RT_SUCCESS(rc))
1075 { /* likely */ }
1076 else
1077 {
1078 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1079 return rc;
1080 }
1081 }
1082 pVCpu->iem.s.cbOpcode += cbToTryRead;
1083 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1084
1085 return VINF_SUCCESS;
1086}
1087
1088#endif /* !IEM_WITH_CODE_TLB */
1089#ifndef IEM_WITH_SETJMP
1090
1091/**
1092 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1093 *
1094 * @returns Strict VBox status code.
1095 * @param pVCpu The cross context virtual CPU structure of the
1096 * calling thread.
1097 * @param pb Where to return the opcode byte.
1098 */
1099VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1100{
1101 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1102 if (rcStrict == VINF_SUCCESS)
1103 {
1104 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1105 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1106 pVCpu->iem.s.offOpcode = offOpcode + 1;
1107 }
1108 else
1109 *pb = 0;
1110 return rcStrict;
1111}
1112
1113#else /* IEM_WITH_SETJMP */
1114
1115/**
1116 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1117 *
1118 * @returns The opcode byte.
1119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1120 */
1121uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1122{
1123# ifdef IEM_WITH_CODE_TLB
1124 uint8_t u8;
1125 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1126 return u8;
1127# else
1128 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1129 if (rcStrict == VINF_SUCCESS)
1130 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1131 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1132# endif
1133}
1134
1135#endif /* IEM_WITH_SETJMP */
1136
1137#ifndef IEM_WITH_SETJMP
1138
1139/**
1140 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1141 *
1142 * @returns Strict VBox status code.
1143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1144 * @param pu16 Where to return the opcode dword.
1145 */
1146VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1147{
1148 uint8_t u8;
1149 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1150 if (rcStrict == VINF_SUCCESS)
1151 *pu16 = (int8_t)u8;
1152 return rcStrict;
1153}
1154
1155
1156/**
1157 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1158 *
1159 * @returns Strict VBox status code.
1160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1161 * @param pu32 Where to return the opcode dword.
1162 */
1163VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1164{
1165 uint8_t u8;
1166 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1167 if (rcStrict == VINF_SUCCESS)
1168 *pu32 = (int8_t)u8;
1169 return rcStrict;
1170}
1171
1172
1173/**
1174 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1175 *
1176 * @returns Strict VBox status code.
1177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1178 * @param pu64 Where to return the opcode qword.
1179 */
1180VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1181{
1182 uint8_t u8;
1183 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1184 if (rcStrict == VINF_SUCCESS)
1185 *pu64 = (int8_t)u8;
1186 return rcStrict;
1187}
1188
1189#endif /* !IEM_WITH_SETJMP */
1190
1191
1192#ifndef IEM_WITH_SETJMP
1193
1194/**
1195 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1196 *
1197 * @returns Strict VBox status code.
1198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1199 * @param pu16 Where to return the opcode word.
1200 */
1201VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1202{
1203 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1204 if (rcStrict == VINF_SUCCESS)
1205 {
1206 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1207# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1208 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1209# else
1210 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1211# endif
1212 pVCpu->iem.s.offOpcode = offOpcode + 2;
1213 }
1214 else
1215 *pu16 = 0;
1216 return rcStrict;
1217}
1218
1219#else /* IEM_WITH_SETJMP */
1220
1221/**
1222 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1223 *
1224 * @returns The opcode word.
1225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1226 */
1227uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1228{
1229# ifdef IEM_WITH_CODE_TLB
1230 uint16_t u16;
1231 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1232 return u16;
1233# else
1234 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1235 if (rcStrict == VINF_SUCCESS)
1236 {
1237 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1238 pVCpu->iem.s.offOpcode += 2;
1239# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1240 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1241# else
1242 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1243# endif
1244 }
1245 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1246# endif
1247}
1248
1249#endif /* IEM_WITH_SETJMP */
1250
1251#ifndef IEM_WITH_SETJMP
1252
1253/**
1254 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1255 *
1256 * @returns Strict VBox status code.
1257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1258 * @param pu32 Where to return the opcode double word.
1259 */
1260VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1261{
1262 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1263 if (rcStrict == VINF_SUCCESS)
1264 {
1265 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1266 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1267 pVCpu->iem.s.offOpcode = offOpcode + 2;
1268 }
1269 else
1270 *pu32 = 0;
1271 return rcStrict;
1272}
1273
1274
1275/**
1276 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1277 *
1278 * @returns Strict VBox status code.
1279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1280 * @param pu64 Where to return the opcode quad word.
1281 */
1282VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1283{
1284 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1285 if (rcStrict == VINF_SUCCESS)
1286 {
1287 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1288 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1289 pVCpu->iem.s.offOpcode = offOpcode + 2;
1290 }
1291 else
1292 *pu64 = 0;
1293 return rcStrict;
1294}
1295
1296#endif /* !IEM_WITH_SETJMP */
1297
1298#ifndef IEM_WITH_SETJMP
1299
1300/**
1301 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1302 *
1303 * @returns Strict VBox status code.
1304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1305 * @param pu32 Where to return the opcode dword.
1306 */
1307VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1308{
1309 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1310 if (rcStrict == VINF_SUCCESS)
1311 {
1312 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1313# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1314 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1315# else
1316 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1317 pVCpu->iem.s.abOpcode[offOpcode + 1],
1318 pVCpu->iem.s.abOpcode[offOpcode + 2],
1319 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1320# endif
1321 pVCpu->iem.s.offOpcode = offOpcode + 4;
1322 }
1323 else
1324 *pu32 = 0;
1325 return rcStrict;
1326}
1327
1328#else /* IEM_WITH_SETJMP */
1329
1330/**
1331 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1332 *
1333 * @returns The opcode dword.
1334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1335 */
1336uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1337{
1338# ifdef IEM_WITH_CODE_TLB
1339 uint32_t u32;
1340 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1341 return u32;
1342# else
1343 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1344 if (rcStrict == VINF_SUCCESS)
1345 {
1346 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1347 pVCpu->iem.s.offOpcode = offOpcode + 4;
1348# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1349 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1350# else
1351 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1352 pVCpu->iem.s.abOpcode[offOpcode + 1],
1353 pVCpu->iem.s.abOpcode[offOpcode + 2],
1354 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1355# endif
1356 }
1357 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1358# endif
1359}
1360
1361#endif /* IEM_WITH_SETJMP */
1362
1363#ifndef IEM_WITH_SETJMP
1364
1365/**
1366 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1367 *
1368 * @returns Strict VBox status code.
1369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1370 * @param pu64 Where to return the opcode dword.
1371 */
1372VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1373{
1374 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1375 if (rcStrict == VINF_SUCCESS)
1376 {
1377 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1378 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1379 pVCpu->iem.s.abOpcode[offOpcode + 1],
1380 pVCpu->iem.s.abOpcode[offOpcode + 2],
1381 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1382 pVCpu->iem.s.offOpcode = offOpcode + 4;
1383 }
1384 else
1385 *pu64 = 0;
1386 return rcStrict;
1387}
1388
1389
1390/**
1391 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1392 *
1393 * @returns Strict VBox status code.
1394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1395 * @param pu64 Where to return the opcode qword.
1396 */
1397VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1398{
1399 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1400 if (rcStrict == VINF_SUCCESS)
1401 {
1402 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1403 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1404 pVCpu->iem.s.abOpcode[offOpcode + 1],
1405 pVCpu->iem.s.abOpcode[offOpcode + 2],
1406 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1407 pVCpu->iem.s.offOpcode = offOpcode + 4;
1408 }
1409 else
1410 *pu64 = 0;
1411 return rcStrict;
1412}
1413
1414#endif /* !IEM_WITH_SETJMP */
1415
1416#ifndef IEM_WITH_SETJMP
1417
1418/**
1419 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1420 *
1421 * @returns Strict VBox status code.
1422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1423 * @param pu64 Where to return the opcode qword.
1424 */
1425VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1426{
1427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1428 if (rcStrict == VINF_SUCCESS)
1429 {
1430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1432 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1433# else
1434 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1435 pVCpu->iem.s.abOpcode[offOpcode + 1],
1436 pVCpu->iem.s.abOpcode[offOpcode + 2],
1437 pVCpu->iem.s.abOpcode[offOpcode + 3],
1438 pVCpu->iem.s.abOpcode[offOpcode + 4],
1439 pVCpu->iem.s.abOpcode[offOpcode + 5],
1440 pVCpu->iem.s.abOpcode[offOpcode + 6],
1441 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1442# endif
1443 pVCpu->iem.s.offOpcode = offOpcode + 8;
1444 }
1445 else
1446 *pu64 = 0;
1447 return rcStrict;
1448}
1449
1450#else /* IEM_WITH_SETJMP */
1451
1452/**
1453 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1454 *
1455 * @returns The opcode qword.
1456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1457 */
1458uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1459{
1460# ifdef IEM_WITH_CODE_TLB
1461 uint64_t u64;
1462 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1463 return u64;
1464# else
1465 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1466 if (rcStrict == VINF_SUCCESS)
1467 {
1468 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1469 pVCpu->iem.s.offOpcode = offOpcode + 8;
1470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1471 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1472# else
1473 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1474 pVCpu->iem.s.abOpcode[offOpcode + 1],
1475 pVCpu->iem.s.abOpcode[offOpcode + 2],
1476 pVCpu->iem.s.abOpcode[offOpcode + 3],
1477 pVCpu->iem.s.abOpcode[offOpcode + 4],
1478 pVCpu->iem.s.abOpcode[offOpcode + 5],
1479 pVCpu->iem.s.abOpcode[offOpcode + 6],
1480 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1481# endif
1482 }
1483 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1484# endif
1485}
1486
1487#endif /* IEM_WITH_SETJMP */
1488
1489
1490
1491/** @name Misc Worker Functions.
1492 * @{
1493 */
1494
1495/**
1496 * Gets the exception class for the specified exception vector.
1497 *
1498 * @returns The class of the specified exception.
1499 * @param uVector The exception vector.
1500 */
1501static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1502{
1503 Assert(uVector <= X86_XCPT_LAST);
1504 switch (uVector)
1505 {
1506 case X86_XCPT_DE:
1507 case X86_XCPT_TS:
1508 case X86_XCPT_NP:
1509 case X86_XCPT_SS:
1510 case X86_XCPT_GP:
1511 case X86_XCPT_SX: /* AMD only */
1512 return IEMXCPTCLASS_CONTRIBUTORY;
1513
1514 case X86_XCPT_PF:
1515 case X86_XCPT_VE: /* Intel only */
1516 return IEMXCPTCLASS_PAGE_FAULT;
1517
1518 case X86_XCPT_DF:
1519 return IEMXCPTCLASS_DOUBLE_FAULT;
1520 }
1521 return IEMXCPTCLASS_BENIGN;
1522}
1523
1524
1525/**
1526 * Evaluates how to handle an exception caused during delivery of another event
1527 * (exception / interrupt).
1528 *
1529 * @returns How to handle the recursive exception.
1530 * @param pVCpu The cross context virtual CPU structure of the
1531 * calling thread.
1532 * @param fPrevFlags The flags of the previous event.
1533 * @param uPrevVector The vector of the previous event.
1534 * @param fCurFlags The flags of the current exception.
1535 * @param uCurVector The vector of the current exception.
1536 * @param pfXcptRaiseInfo Where to store additional information about the
1537 * exception condition. Optional.
1538 */
1539VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1540 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1541{
1542 /*
1543 * Only CPU exceptions can be raised while delivering other events, software interrupt
1544 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1545 */
1546 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1547 Assert(pVCpu); RT_NOREF(pVCpu);
1548 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1549
1550 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1551 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1552 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1553 {
1554 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1555 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1556 {
1557 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1558 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1559 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1560 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1561 {
1562 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1563 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1564 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1565 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1566 uCurVector, pVCpu->cpum.GstCtx.cr2));
1567 }
1568 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1569 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1570 {
1571 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1572 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1573 }
1574 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1575 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1576 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1577 {
1578 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1579 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1580 }
1581 }
1582 else
1583 {
1584 if (uPrevVector == X86_XCPT_NMI)
1585 {
1586 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1587 if (uCurVector == X86_XCPT_PF)
1588 {
1589 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1590 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1591 }
1592 }
1593 else if ( uPrevVector == X86_XCPT_AC
1594 && uCurVector == X86_XCPT_AC)
1595 {
1596 enmRaise = IEMXCPTRAISE_CPU_HANG;
1597 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1598 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1599 }
1600 }
1601 }
1602 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1603 {
1604 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1605 if (uCurVector == X86_XCPT_PF)
1606 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1607 }
1608 else
1609 {
1610 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1611 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1612 }
1613
1614 if (pfXcptRaiseInfo)
1615 *pfXcptRaiseInfo = fRaiseInfo;
1616 return enmRaise;
1617}
1618
1619
1620/**
1621 * Enters the CPU shutdown state initiated by a triple fault or other
1622 * unrecoverable conditions.
1623 *
1624 * @returns Strict VBox status code.
1625 * @param pVCpu The cross context virtual CPU structure of the
1626 * calling thread.
1627 */
1628static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1629{
1630 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1631 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1632
1633 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1634 {
1635 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1636 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1637 }
1638
1639 RT_NOREF(pVCpu);
1640 return VINF_EM_TRIPLE_FAULT;
1641}
1642
1643
1644/**
1645 * Validates a new SS segment.
1646 *
1647 * @returns VBox strict status code.
1648 * @param pVCpu The cross context virtual CPU structure of the
1649 * calling thread.
1650 * @param NewSS The new SS selctor.
1651 * @param uCpl The CPL to load the stack for.
1652 * @param pDesc Where to return the descriptor.
1653 */
1654static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1655{
1656 /* Null selectors are not allowed (we're not called for dispatching
1657 interrupts with SS=0 in long mode). */
1658 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1659 {
1660 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1661 return iemRaiseTaskSwitchFault0(pVCpu);
1662 }
1663
1664 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1665 if ((NewSS & X86_SEL_RPL) != uCpl)
1666 {
1667 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1668 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1669 }
1670
1671 /*
1672 * Read the descriptor.
1673 */
1674 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1675 if (rcStrict != VINF_SUCCESS)
1676 return rcStrict;
1677
1678 /*
1679 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1680 */
1681 if (!pDesc->Legacy.Gen.u1DescType)
1682 {
1683 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1684 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1685 }
1686
1687 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1688 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1689 {
1690 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1691 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1692 }
1693 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1694 {
1695 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1696 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1697 }
1698
1699 /* Is it there? */
1700 /** @todo testcase: Is this checked before the canonical / limit check below? */
1701 if (!pDesc->Legacy.Gen.u1Present)
1702 {
1703 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1704 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1705 }
1706
1707 return VINF_SUCCESS;
1708}
1709
1710/** @} */
1711
1712
1713/** @name Raising Exceptions.
1714 *
1715 * @{
1716 */
1717
1718
1719/**
1720 * Loads the specified stack far pointer from the TSS.
1721 *
1722 * @returns VBox strict status code.
1723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1724 * @param uCpl The CPL to load the stack for.
1725 * @param pSelSS Where to return the new stack segment.
1726 * @param puEsp Where to return the new stack pointer.
1727 */
1728static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1729{
1730 VBOXSTRICTRC rcStrict;
1731 Assert(uCpl < 4);
1732
1733 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1734 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1735 {
1736 /*
1737 * 16-bit TSS (X86TSS16).
1738 */
1739 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1740 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1741 {
1742 uint32_t off = uCpl * 4 + 2;
1743 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1744 {
1745 /** @todo check actual access pattern here. */
1746 uint32_t u32Tmp = 0; /* gcc maybe... */
1747 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1748 if (rcStrict == VINF_SUCCESS)
1749 {
1750 *puEsp = RT_LOWORD(u32Tmp);
1751 *pSelSS = RT_HIWORD(u32Tmp);
1752 return VINF_SUCCESS;
1753 }
1754 }
1755 else
1756 {
1757 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1758 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1759 }
1760 break;
1761 }
1762
1763 /*
1764 * 32-bit TSS (X86TSS32).
1765 */
1766 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1767 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1768 {
1769 uint32_t off = uCpl * 8 + 4;
1770 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1771 {
1772/** @todo check actual access pattern here. */
1773 uint64_t u64Tmp;
1774 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1775 if (rcStrict == VINF_SUCCESS)
1776 {
1777 *puEsp = u64Tmp & UINT32_MAX;
1778 *pSelSS = (RTSEL)(u64Tmp >> 32);
1779 return VINF_SUCCESS;
1780 }
1781 }
1782 else
1783 {
1784 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1785 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1786 }
1787 break;
1788 }
1789
1790 default:
1791 AssertFailed();
1792 rcStrict = VERR_IEM_IPE_4;
1793 break;
1794 }
1795
1796 *puEsp = 0; /* make gcc happy */
1797 *pSelSS = 0; /* make gcc happy */
1798 return rcStrict;
1799}
1800
1801
1802/**
1803 * Loads the specified stack pointer from the 64-bit TSS.
1804 *
1805 * @returns VBox strict status code.
1806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1807 * @param uCpl The CPL to load the stack for.
1808 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1809 * @param puRsp Where to return the new stack pointer.
1810 */
1811static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1812{
1813 Assert(uCpl < 4);
1814 Assert(uIst < 8);
1815 *puRsp = 0; /* make gcc happy */
1816
1817 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1818 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1819
1820 uint32_t off;
1821 if (uIst)
1822 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1823 else
1824 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1825 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1826 {
1827 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1828 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1829 }
1830
1831 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1832}
1833
1834
1835/**
1836 * Adjust the CPU state according to the exception being raised.
1837 *
1838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1839 * @param u8Vector The exception that has been raised.
1840 */
1841DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1842{
1843 switch (u8Vector)
1844 {
1845 case X86_XCPT_DB:
1846 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1847 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1848 break;
1849 /** @todo Read the AMD and Intel exception reference... */
1850 }
1851}
1852
1853
1854/**
1855 * Implements exceptions and interrupts for real mode.
1856 *
1857 * @returns VBox strict status code.
1858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1859 * @param cbInstr The number of bytes to offset rIP by in the return
1860 * address.
1861 * @param u8Vector The interrupt / exception vector number.
1862 * @param fFlags The flags.
1863 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1864 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1865 */
1866static VBOXSTRICTRC
1867iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1868 uint8_t cbInstr,
1869 uint8_t u8Vector,
1870 uint32_t fFlags,
1871 uint16_t uErr,
1872 uint64_t uCr2) RT_NOEXCEPT
1873{
1874 NOREF(uErr); NOREF(uCr2);
1875 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1876
1877 /*
1878 * Read the IDT entry.
1879 */
1880 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1881 {
1882 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1883 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1884 }
1885 RTFAR16 Idte;
1886 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1887 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1888 {
1889 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1890 return rcStrict;
1891 }
1892
1893 /*
1894 * Push the stack frame.
1895 */
1896 uint16_t *pu16Frame;
1897 uint64_t uNewRsp;
1898 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
1899 if (rcStrict != VINF_SUCCESS)
1900 return rcStrict;
1901
1902 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1903#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1904 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1905 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1906 fEfl |= UINT16_C(0xf000);
1907#endif
1908 pu16Frame[2] = (uint16_t)fEfl;
1909 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1910 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1911 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1912 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1913 return rcStrict;
1914
1915 /*
1916 * Load the vector address into cs:ip and make exception specific state
1917 * adjustments.
1918 */
1919 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1920 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1921 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1922 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1923 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1924 pVCpu->cpum.GstCtx.rip = Idte.off;
1925 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1926 IEMMISC_SET_EFL(pVCpu, fEfl);
1927
1928 /** @todo do we actually do this in real mode? */
1929 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1930 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1931
1932 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1933}
1934
1935
1936/**
1937 * Loads a NULL data selector into when coming from V8086 mode.
1938 *
1939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1940 * @param pSReg Pointer to the segment register.
1941 */
1942DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1943{
1944 pSReg->Sel = 0;
1945 pSReg->ValidSel = 0;
1946 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1947 {
1948 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1949 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1950 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1951 }
1952 else
1953 {
1954 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1955 /** @todo check this on AMD-V */
1956 pSReg->u64Base = 0;
1957 pSReg->u32Limit = 0;
1958 }
1959}
1960
1961
1962/**
1963 * Loads a segment selector during a task switch in V8086 mode.
1964 *
1965 * @param pSReg Pointer to the segment register.
1966 * @param uSel The selector value to load.
1967 */
1968DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1969{
1970 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1971 pSReg->Sel = uSel;
1972 pSReg->ValidSel = uSel;
1973 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1974 pSReg->u64Base = uSel << 4;
1975 pSReg->u32Limit = 0xffff;
1976 pSReg->Attr.u = 0xf3;
1977}
1978
1979
1980/**
1981 * Loads a segment selector during a task switch in protected mode.
1982 *
1983 * In this task switch scenario, we would throw \#TS exceptions rather than
1984 * \#GPs.
1985 *
1986 * @returns VBox strict status code.
1987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1988 * @param pSReg Pointer to the segment register.
1989 * @param uSel The new selector value.
1990 *
1991 * @remarks This does _not_ handle CS or SS.
1992 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
1993 */
1994static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
1995{
1996 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
1997
1998 /* Null data selector. */
1999 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2000 {
2001 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2003 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2004 return VINF_SUCCESS;
2005 }
2006
2007 /* Fetch the descriptor. */
2008 IEMSELDESC Desc;
2009 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2010 if (rcStrict != VINF_SUCCESS)
2011 {
2012 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2013 VBOXSTRICTRC_VAL(rcStrict)));
2014 return rcStrict;
2015 }
2016
2017 /* Must be a data segment or readable code segment. */
2018 if ( !Desc.Legacy.Gen.u1DescType
2019 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2020 {
2021 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2022 Desc.Legacy.Gen.u4Type));
2023 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2024 }
2025
2026 /* Check privileges for data segments and non-conforming code segments. */
2027 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2028 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2029 {
2030 /* The RPL and the new CPL must be less than or equal to the DPL. */
2031 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2032 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2033 {
2034 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2035 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2036 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2037 }
2038 }
2039
2040 /* Is it there? */
2041 if (!Desc.Legacy.Gen.u1Present)
2042 {
2043 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2044 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2045 }
2046
2047 /* The base and limit. */
2048 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2049 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2050
2051 /*
2052 * Ok, everything checked out fine. Now set the accessed bit before
2053 * committing the result into the registers.
2054 */
2055 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2056 {
2057 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2058 if (rcStrict != VINF_SUCCESS)
2059 return rcStrict;
2060 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2061 }
2062
2063 /* Commit */
2064 pSReg->Sel = uSel;
2065 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2066 pSReg->u32Limit = cbLimit;
2067 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2068 pSReg->ValidSel = uSel;
2069 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2070 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2071 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2072
2073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2074 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2075 return VINF_SUCCESS;
2076}
2077
2078
2079/**
2080 * Performs a task switch.
2081 *
2082 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2083 * caller is responsible for performing the necessary checks (like DPL, TSS
2084 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2085 * reference for JMP, CALL, IRET.
2086 *
2087 * If the task switch is the due to a software interrupt or hardware exception,
2088 * the caller is responsible for validating the TSS selector and descriptor. See
2089 * Intel Instruction reference for INT n.
2090 *
2091 * @returns VBox strict status code.
2092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2093 * @param enmTaskSwitch The cause of the task switch.
2094 * @param uNextEip The EIP effective after the task switch.
2095 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2096 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2097 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2098 * @param SelTSS The TSS selector of the new task.
2099 * @param pNewDescTSS Pointer to the new TSS descriptor.
2100 */
2101VBOXSTRICTRC
2102iemTaskSwitch(PVMCPUCC pVCpu,
2103 IEMTASKSWITCH enmTaskSwitch,
2104 uint32_t uNextEip,
2105 uint32_t fFlags,
2106 uint16_t uErr,
2107 uint64_t uCr2,
2108 RTSEL SelTSS,
2109 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2110{
2111 Assert(!IEM_IS_REAL_MODE(pVCpu));
2112 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2114
2115 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2116 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2117 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2118 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2119 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2120
2121 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2122 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2123
2124 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2125 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2126
2127 /* Update CR2 in case it's a page-fault. */
2128 /** @todo This should probably be done much earlier in IEM/PGM. See
2129 * @bugref{5653#c49}. */
2130 if (fFlags & IEM_XCPT_FLAGS_CR2)
2131 pVCpu->cpum.GstCtx.cr2 = uCr2;
2132
2133 /*
2134 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2135 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2136 */
2137 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2138 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2139 if (uNewTSSLimit < uNewTSSLimitMin)
2140 {
2141 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2142 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2143 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2144 }
2145
2146 /*
2147 * Task switches in VMX non-root mode always cause task switches.
2148 * The new TSS must have been read and validated (DPL, limits etc.) before a
2149 * task-switch VM-exit commences.
2150 *
2151 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2152 */
2153 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2154 {
2155 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2156 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2157 }
2158
2159 /*
2160 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2161 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2162 */
2163 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2164 {
2165 uint32_t const uExitInfo1 = SelTSS;
2166 uint32_t uExitInfo2 = uErr;
2167 switch (enmTaskSwitch)
2168 {
2169 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2170 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2171 default: break;
2172 }
2173 if (fFlags & IEM_XCPT_FLAGS_ERR)
2174 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2175 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2176 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2177
2178 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2179 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2180 RT_NOREF2(uExitInfo1, uExitInfo2);
2181 }
2182
2183 /*
2184 * Check the current TSS limit. The last written byte to the current TSS during the
2185 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2186 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2187 *
2188 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2189 * end up with smaller than "legal" TSS limits.
2190 */
2191 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2192 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2193 if (uCurTSSLimit < uCurTSSLimitMin)
2194 {
2195 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2196 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2197 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2198 }
2199
2200 /*
2201 * Verify that the new TSS can be accessed and map it. Map only the required contents
2202 * and not the entire TSS.
2203 */
2204 void *pvNewTSS;
2205 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2206 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2207 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2208 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2209 * not perform correct translation if this happens. See Intel spec. 7.2.1
2210 * "Task-State Segment". */
2211 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
2212 if (rcStrict != VINF_SUCCESS)
2213 {
2214 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2215 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2216 return rcStrict;
2217 }
2218
2219 /*
2220 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2221 */
2222 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2223 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2224 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2225 {
2226 PX86DESC pDescCurTSS;
2227 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2228 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2229 if (rcStrict != VINF_SUCCESS)
2230 {
2231 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2232 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2233 return rcStrict;
2234 }
2235
2236 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2237 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2238 if (rcStrict != VINF_SUCCESS)
2239 {
2240 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2241 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2242 return rcStrict;
2243 }
2244
2245 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2246 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2247 {
2248 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2249 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2250 u32EFlags &= ~X86_EFL_NT;
2251 }
2252 }
2253
2254 /*
2255 * Save the CPU state into the current TSS.
2256 */
2257 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2258 if (GCPtrNewTSS == GCPtrCurTSS)
2259 {
2260 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2261 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2262 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2263 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2264 pVCpu->cpum.GstCtx.ldtr.Sel));
2265 }
2266 if (fIsNewTSS386)
2267 {
2268 /*
2269 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2270 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2271 */
2272 void *pvCurTSS32;
2273 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2274 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2275 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2276 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2277 if (rcStrict != VINF_SUCCESS)
2278 {
2279 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2280 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2281 return rcStrict;
2282 }
2283
2284 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2285 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2286 pCurTSS32->eip = uNextEip;
2287 pCurTSS32->eflags = u32EFlags;
2288 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2289 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2290 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2291 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2292 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2293 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2294 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2295 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2296 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2297 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2298 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2299 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2300 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2301 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2302
2303 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2304 if (rcStrict != VINF_SUCCESS)
2305 {
2306 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2307 VBOXSTRICTRC_VAL(rcStrict)));
2308 return rcStrict;
2309 }
2310 }
2311 else
2312 {
2313 /*
2314 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2315 */
2316 void *pvCurTSS16;
2317 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2318 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2319 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2320 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
2321 if (rcStrict != VINF_SUCCESS)
2322 {
2323 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2324 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2325 return rcStrict;
2326 }
2327
2328 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2329 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2330 pCurTSS16->ip = uNextEip;
2331 pCurTSS16->flags = u32EFlags;
2332 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2333 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2334 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2335 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2336 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2337 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2338 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2339 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2340 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2341 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2342 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2343 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2344
2345 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2346 if (rcStrict != VINF_SUCCESS)
2347 {
2348 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2349 VBOXSTRICTRC_VAL(rcStrict)));
2350 return rcStrict;
2351 }
2352 }
2353
2354 /*
2355 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2356 */
2357 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2358 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2359 {
2360 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2361 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2362 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2363 }
2364
2365 /*
2366 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2367 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2368 */
2369 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2370 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2371 bool fNewDebugTrap;
2372 if (fIsNewTSS386)
2373 {
2374 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2375 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2376 uNewEip = pNewTSS32->eip;
2377 uNewEflags = pNewTSS32->eflags;
2378 uNewEax = pNewTSS32->eax;
2379 uNewEcx = pNewTSS32->ecx;
2380 uNewEdx = pNewTSS32->edx;
2381 uNewEbx = pNewTSS32->ebx;
2382 uNewEsp = pNewTSS32->esp;
2383 uNewEbp = pNewTSS32->ebp;
2384 uNewEsi = pNewTSS32->esi;
2385 uNewEdi = pNewTSS32->edi;
2386 uNewES = pNewTSS32->es;
2387 uNewCS = pNewTSS32->cs;
2388 uNewSS = pNewTSS32->ss;
2389 uNewDS = pNewTSS32->ds;
2390 uNewFS = pNewTSS32->fs;
2391 uNewGS = pNewTSS32->gs;
2392 uNewLdt = pNewTSS32->selLdt;
2393 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2394 }
2395 else
2396 {
2397 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2398 uNewCr3 = 0;
2399 uNewEip = pNewTSS16->ip;
2400 uNewEflags = pNewTSS16->flags;
2401 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2402 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2403 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2404 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2405 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2406 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2407 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2408 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2409 uNewES = pNewTSS16->es;
2410 uNewCS = pNewTSS16->cs;
2411 uNewSS = pNewTSS16->ss;
2412 uNewDS = pNewTSS16->ds;
2413 uNewFS = 0;
2414 uNewGS = 0;
2415 uNewLdt = pNewTSS16->selLdt;
2416 fNewDebugTrap = false;
2417 }
2418
2419 if (GCPtrNewTSS == GCPtrCurTSS)
2420 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2421 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2422
2423 /*
2424 * We're done accessing the new TSS.
2425 */
2426 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2430 return rcStrict;
2431 }
2432
2433 /*
2434 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2435 */
2436 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2437 {
2438 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2439 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
2440 if (rcStrict != VINF_SUCCESS)
2441 {
2442 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2443 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2444 return rcStrict;
2445 }
2446
2447 /* Check that the descriptor indicates the new TSS is available (not busy). */
2448 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2449 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2450 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2451
2452 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2453 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2454 if (rcStrict != VINF_SUCCESS)
2455 {
2456 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2457 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2458 return rcStrict;
2459 }
2460 }
2461
2462 /*
2463 * From this point on, we're technically in the new task. We will defer exceptions
2464 * until the completion of the task switch but before executing any instructions in the new task.
2465 */
2466 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2467 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2468 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2469 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2470 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2471 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2472 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2473
2474 /* Set the busy bit in TR. */
2475 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2476
2477 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2478 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2479 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2480 {
2481 uNewEflags |= X86_EFL_NT;
2482 }
2483
2484 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2485 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2486 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2487
2488 pVCpu->cpum.GstCtx.eip = uNewEip;
2489 pVCpu->cpum.GstCtx.eax = uNewEax;
2490 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2491 pVCpu->cpum.GstCtx.edx = uNewEdx;
2492 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2493 pVCpu->cpum.GstCtx.esp = uNewEsp;
2494 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2495 pVCpu->cpum.GstCtx.esi = uNewEsi;
2496 pVCpu->cpum.GstCtx.edi = uNewEdi;
2497
2498 uNewEflags &= X86_EFL_LIVE_MASK;
2499 uNewEflags |= X86_EFL_RA1_MASK;
2500 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2501
2502 /*
2503 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2504 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2505 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2506 */
2507 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2508 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2509
2510 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2511 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2512
2513 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2514 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2515
2516 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2517 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2518
2519 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2520 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2521
2522 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2523 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2524 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2525
2526 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2527 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2528 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2529 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2530
2531 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2532 {
2533 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2534 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2535 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2536 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2537 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2538 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2539 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2540 }
2541
2542 /*
2543 * Switch CR3 for the new task.
2544 */
2545 if ( fIsNewTSS386
2546 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2547 {
2548 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2549 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2550 AssertRCSuccessReturn(rc, rc);
2551
2552 /* Inform PGM. */
2553 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2554 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2555 AssertRCReturn(rc, rc);
2556 /* ignore informational status codes */
2557
2558 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2559 }
2560
2561 /*
2562 * Switch LDTR for the new task.
2563 */
2564 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2565 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2566 else
2567 {
2568 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2569
2570 IEMSELDESC DescNewLdt;
2571 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2572 if (rcStrict != VINF_SUCCESS)
2573 {
2574 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2575 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2576 return rcStrict;
2577 }
2578 if ( !DescNewLdt.Legacy.Gen.u1Present
2579 || DescNewLdt.Legacy.Gen.u1DescType
2580 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2581 {
2582 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2583 uNewLdt, DescNewLdt.Legacy.u));
2584 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2585 }
2586
2587 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2588 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2589 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2590 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2591 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2592 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2593 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2595 }
2596
2597 IEMSELDESC DescSS;
2598 if (IEM_IS_V86_MODE(pVCpu))
2599 {
2600 pVCpu->iem.s.uCpl = 3;
2601 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2602 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2603 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2604 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2605 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2606 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2607
2608 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2609 DescSS.Legacy.u = 0;
2610 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2611 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2612 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2613 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2614 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2615 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2616 DescSS.Legacy.Gen.u2Dpl = 3;
2617 }
2618 else
2619 {
2620 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2621
2622 /*
2623 * Load the stack segment for the new task.
2624 */
2625 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2626 {
2627 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2628 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2629 }
2630
2631 /* Fetch the descriptor. */
2632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2633 if (rcStrict != VINF_SUCCESS)
2634 {
2635 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2636 VBOXSTRICTRC_VAL(rcStrict)));
2637 return rcStrict;
2638 }
2639
2640 /* SS must be a data segment and writable. */
2641 if ( !DescSS.Legacy.Gen.u1DescType
2642 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2643 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2644 {
2645 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2646 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2647 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2648 }
2649
2650 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2651 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2652 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2653 {
2654 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2655 uNewCpl));
2656 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2657 }
2658
2659 /* Is it there? */
2660 if (!DescSS.Legacy.Gen.u1Present)
2661 {
2662 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2663 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2664 }
2665
2666 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2667 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2668
2669 /* Set the accessed bit before committing the result into SS. */
2670 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2671 {
2672 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2673 if (rcStrict != VINF_SUCCESS)
2674 return rcStrict;
2675 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2676 }
2677
2678 /* Commit SS. */
2679 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2680 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2681 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2682 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2683 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2684 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2685 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2686
2687 /* CPL has changed, update IEM before loading rest of segments. */
2688 pVCpu->iem.s.uCpl = uNewCpl;
2689
2690 /*
2691 * Load the data segments for the new task.
2692 */
2693 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2694 if (rcStrict != VINF_SUCCESS)
2695 return rcStrict;
2696 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2697 if (rcStrict != VINF_SUCCESS)
2698 return rcStrict;
2699 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2700 if (rcStrict != VINF_SUCCESS)
2701 return rcStrict;
2702 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2703 if (rcStrict != VINF_SUCCESS)
2704 return rcStrict;
2705
2706 /*
2707 * Load the code segment for the new task.
2708 */
2709 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2710 {
2711 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2712 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2713 }
2714
2715 /* Fetch the descriptor. */
2716 IEMSELDESC DescCS;
2717 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2718 if (rcStrict != VINF_SUCCESS)
2719 {
2720 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2721 return rcStrict;
2722 }
2723
2724 /* CS must be a code segment. */
2725 if ( !DescCS.Legacy.Gen.u1DescType
2726 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2727 {
2728 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2729 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2730 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2731 }
2732
2733 /* For conforming CS, DPL must be less than or equal to the RPL. */
2734 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2735 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2736 {
2737 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2738 DescCS.Legacy.Gen.u2Dpl));
2739 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2740 }
2741
2742 /* For non-conforming CS, DPL must match RPL. */
2743 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2744 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2745 {
2746 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2747 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2748 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2749 }
2750
2751 /* Is it there? */
2752 if (!DescCS.Legacy.Gen.u1Present)
2753 {
2754 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2755 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2756 }
2757
2758 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2759 u64Base = X86DESC_BASE(&DescCS.Legacy);
2760
2761 /* Set the accessed bit before committing the result into CS. */
2762 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2763 {
2764 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2765 if (rcStrict != VINF_SUCCESS)
2766 return rcStrict;
2767 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2768 }
2769
2770 /* Commit CS. */
2771 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2772 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2773 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2774 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2775 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2776 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2778 }
2779
2780 /** @todo Debug trap. */
2781 if (fIsNewTSS386 && fNewDebugTrap)
2782 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2783
2784 /*
2785 * Construct the error code masks based on what caused this task switch.
2786 * See Intel Instruction reference for INT.
2787 */
2788 uint16_t uExt;
2789 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2790 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2791 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2792 {
2793 uExt = 1;
2794 }
2795 else
2796 uExt = 0;
2797
2798 /*
2799 * Push any error code on to the new stack.
2800 */
2801 if (fFlags & IEM_XCPT_FLAGS_ERR)
2802 {
2803 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2804 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2805 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2806
2807 /* Check that there is sufficient space on the stack. */
2808 /** @todo Factor out segment limit checking for normal/expand down segments
2809 * into a separate function. */
2810 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2811 {
2812 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2813 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2814 {
2815 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2816 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2817 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2818 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2819 }
2820 }
2821 else
2822 {
2823 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2824 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2825 {
2826 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2827 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2828 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2829 }
2830 }
2831
2832
2833 if (fIsNewTSS386)
2834 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2835 else
2836 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2837 if (rcStrict != VINF_SUCCESS)
2838 {
2839 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2840 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2841 return rcStrict;
2842 }
2843 }
2844
2845 /* Check the new EIP against the new CS limit. */
2846 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2847 {
2848 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2849 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2850 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2851 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2852 }
2853
2854 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2855 pVCpu->cpum.GstCtx.ss.Sel));
2856 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2857}
2858
2859
2860/**
2861 * Implements exceptions and interrupts for protected mode.
2862 *
2863 * @returns VBox strict status code.
2864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2865 * @param cbInstr The number of bytes to offset rIP by in the return
2866 * address.
2867 * @param u8Vector The interrupt / exception vector number.
2868 * @param fFlags The flags.
2869 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2870 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2871 */
2872static VBOXSTRICTRC
2873iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2874 uint8_t cbInstr,
2875 uint8_t u8Vector,
2876 uint32_t fFlags,
2877 uint16_t uErr,
2878 uint64_t uCr2) RT_NOEXCEPT
2879{
2880 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2881
2882 /*
2883 * Read the IDT entry.
2884 */
2885 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2886 {
2887 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2888 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2889 }
2890 X86DESC Idte;
2891 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2892 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2893 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2894 {
2895 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2896 return rcStrict;
2897 }
2898 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2899 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2900 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2901
2902 /*
2903 * Check the descriptor type, DPL and such.
2904 * ASSUMES this is done in the same order as described for call-gate calls.
2905 */
2906 if (Idte.Gate.u1DescType)
2907 {
2908 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2909 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2910 }
2911 bool fTaskGate = false;
2912 uint8_t f32BitGate = true;
2913 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2914 switch (Idte.Gate.u4Type)
2915 {
2916 case X86_SEL_TYPE_SYS_UNDEFINED:
2917 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2918 case X86_SEL_TYPE_SYS_LDT:
2919 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2920 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2921 case X86_SEL_TYPE_SYS_UNDEFINED2:
2922 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2923 case X86_SEL_TYPE_SYS_UNDEFINED3:
2924 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2925 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2926 case X86_SEL_TYPE_SYS_UNDEFINED4:
2927 {
2928 /** @todo check what actually happens when the type is wrong...
2929 * esp. call gates. */
2930 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2931 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2932 }
2933
2934 case X86_SEL_TYPE_SYS_286_INT_GATE:
2935 f32BitGate = false;
2936 RT_FALL_THRU();
2937 case X86_SEL_TYPE_SYS_386_INT_GATE:
2938 fEflToClear |= X86_EFL_IF;
2939 break;
2940
2941 case X86_SEL_TYPE_SYS_TASK_GATE:
2942 fTaskGate = true;
2943#ifndef IEM_IMPLEMENTS_TASKSWITCH
2944 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2945#endif
2946 break;
2947
2948 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2949 f32BitGate = false;
2950 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2951 break;
2952
2953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2954 }
2955
2956 /* Check DPL against CPL if applicable. */
2957 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2958 {
2959 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2960 {
2961 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2962 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2963 }
2964 }
2965
2966 /* Is it there? */
2967 if (!Idte.Gate.u1Present)
2968 {
2969 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2970 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2971 }
2972
2973 /* Is it a task-gate? */
2974 if (fTaskGate)
2975 {
2976 /*
2977 * Construct the error code masks based on what caused this task switch.
2978 * See Intel Instruction reference for INT.
2979 */
2980 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2981 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2982 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
2983 RTSEL SelTSS = Idte.Gate.u16Sel;
2984
2985 /*
2986 * Fetch the TSS descriptor in the GDT.
2987 */
2988 IEMSELDESC DescTSS;
2989 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
2990 if (rcStrict != VINF_SUCCESS)
2991 {
2992 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
2993 VBOXSTRICTRC_VAL(rcStrict)));
2994 return rcStrict;
2995 }
2996
2997 /* The TSS descriptor must be a system segment and be available (not busy). */
2998 if ( DescTSS.Legacy.Gen.u1DescType
2999 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3000 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3001 {
3002 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3003 u8Vector, SelTSS, DescTSS.Legacy.au64));
3004 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3005 }
3006
3007 /* The TSS must be present. */
3008 if (!DescTSS.Legacy.Gen.u1Present)
3009 {
3010 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3011 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3012 }
3013
3014 /* Do the actual task switch. */
3015 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3016 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3017 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3018 }
3019
3020 /* A null CS is bad. */
3021 RTSEL NewCS = Idte.Gate.u16Sel;
3022 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3023 {
3024 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3025 return iemRaiseGeneralProtectionFault0(pVCpu);
3026 }
3027
3028 /* Fetch the descriptor for the new CS. */
3029 IEMSELDESC DescCS;
3030 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3031 if (rcStrict != VINF_SUCCESS)
3032 {
3033 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3034 return rcStrict;
3035 }
3036
3037 /* Must be a code segment. */
3038 if (!DescCS.Legacy.Gen.u1DescType)
3039 {
3040 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3041 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3042 }
3043 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3044 {
3045 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3046 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3047 }
3048
3049 /* Don't allow lowering the privilege level. */
3050 /** @todo Does the lowering of privileges apply to software interrupts
3051 * only? This has bearings on the more-privileged or
3052 * same-privilege stack behavior further down. A testcase would
3053 * be nice. */
3054 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3055 {
3056 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3057 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3058 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3059 }
3060
3061 /* Make sure the selector is present. */
3062 if (!DescCS.Legacy.Gen.u1Present)
3063 {
3064 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3065 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3066 }
3067
3068 /* Check the new EIP against the new CS limit. */
3069 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3070 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3071 ? Idte.Gate.u16OffsetLow
3072 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3073 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3074 if (uNewEip > cbLimitCS)
3075 {
3076 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3077 u8Vector, uNewEip, cbLimitCS, NewCS));
3078 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3079 }
3080 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3081
3082 /* Calc the flag image to push. */
3083 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3084 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3085 fEfl &= ~X86_EFL_RF;
3086 else
3087 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3088
3089 /* From V8086 mode only go to CPL 0. */
3090 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3091 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3092 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3093 {
3094 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3095 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3096 }
3097
3098 /*
3099 * If the privilege level changes, we need to get a new stack from the TSS.
3100 * This in turns means validating the new SS and ESP...
3101 */
3102 if (uNewCpl != pVCpu->iem.s.uCpl)
3103 {
3104 RTSEL NewSS;
3105 uint32_t uNewEsp;
3106 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3107 if (rcStrict != VINF_SUCCESS)
3108 return rcStrict;
3109
3110 IEMSELDESC DescSS;
3111 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3112 if (rcStrict != VINF_SUCCESS)
3113 return rcStrict;
3114 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3115 if (!DescSS.Legacy.Gen.u1DefBig)
3116 {
3117 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3118 uNewEsp = (uint16_t)uNewEsp;
3119 }
3120
3121 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3122
3123 /* Check that there is sufficient space for the stack frame. */
3124 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3125 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3126 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3127 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3128
3129 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3130 {
3131 if ( uNewEsp - 1 > cbLimitSS
3132 || uNewEsp < cbStackFrame)
3133 {
3134 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3135 u8Vector, NewSS, uNewEsp, cbStackFrame));
3136 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3137 }
3138 }
3139 else
3140 {
3141 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3142 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3143 {
3144 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3145 u8Vector, NewSS, uNewEsp, cbStackFrame));
3146 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3147 }
3148 }
3149
3150 /*
3151 * Start making changes.
3152 */
3153
3154 /* Set the new CPL so that stack accesses use it. */
3155 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3156 pVCpu->iem.s.uCpl = uNewCpl;
3157
3158 /* Create the stack frame. */
3159 RTPTRUNION uStackFrame;
3160 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3161 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3162 if (rcStrict != VINF_SUCCESS)
3163 return rcStrict;
3164 void * const pvStackFrame = uStackFrame.pv;
3165 if (f32BitGate)
3166 {
3167 if (fFlags & IEM_XCPT_FLAGS_ERR)
3168 *uStackFrame.pu32++ = uErr;
3169 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3170 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3171 uStackFrame.pu32[2] = fEfl;
3172 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3173 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3174 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3175 if (fEfl & X86_EFL_VM)
3176 {
3177 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3178 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3179 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3180 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3181 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3182 }
3183 }
3184 else
3185 {
3186 if (fFlags & IEM_XCPT_FLAGS_ERR)
3187 *uStackFrame.pu16++ = uErr;
3188 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3189 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3190 uStackFrame.pu16[2] = fEfl;
3191 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3192 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3193 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3194 if (fEfl & X86_EFL_VM)
3195 {
3196 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3197 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3198 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3199 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3200 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3201 }
3202 }
3203 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3204 if (rcStrict != VINF_SUCCESS)
3205 return rcStrict;
3206
3207 /* Mark the selectors 'accessed' (hope this is the correct time). */
3208 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3209 * after pushing the stack frame? (Write protect the gdt + stack to
3210 * find out.) */
3211 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3212 {
3213 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3214 if (rcStrict != VINF_SUCCESS)
3215 return rcStrict;
3216 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3217 }
3218
3219 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3220 {
3221 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3222 if (rcStrict != VINF_SUCCESS)
3223 return rcStrict;
3224 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3225 }
3226
3227 /*
3228 * Start comitting the register changes (joins with the DPL=CPL branch).
3229 */
3230 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3231 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3232 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3233 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3234 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3235 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3236 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3237 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3238 * SP is loaded).
3239 * Need to check the other combinations too:
3240 * - 16-bit TSS, 32-bit handler
3241 * - 32-bit TSS, 16-bit handler */
3242 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3243 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3244 else
3245 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3246
3247 if (fEfl & X86_EFL_VM)
3248 {
3249 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3250 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3251 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3252 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3253 }
3254 }
3255 /*
3256 * Same privilege, no stack change and smaller stack frame.
3257 */
3258 else
3259 {
3260 uint64_t uNewRsp;
3261 RTPTRUNION uStackFrame;
3262 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3263 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
3264 if (rcStrict != VINF_SUCCESS)
3265 return rcStrict;
3266 void * const pvStackFrame = uStackFrame.pv;
3267
3268 if (f32BitGate)
3269 {
3270 if (fFlags & IEM_XCPT_FLAGS_ERR)
3271 *uStackFrame.pu32++ = uErr;
3272 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3273 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3274 uStackFrame.pu32[2] = fEfl;
3275 }
3276 else
3277 {
3278 if (fFlags & IEM_XCPT_FLAGS_ERR)
3279 *uStackFrame.pu16++ = uErr;
3280 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3281 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3282 uStackFrame.pu16[2] = fEfl;
3283 }
3284 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3285 if (rcStrict != VINF_SUCCESS)
3286 return rcStrict;
3287
3288 /* Mark the CS selector as 'accessed'. */
3289 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3290 {
3291 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3292 if (rcStrict != VINF_SUCCESS)
3293 return rcStrict;
3294 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3295 }
3296
3297 /*
3298 * Start committing the register changes (joins with the other branch).
3299 */
3300 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3301 }
3302
3303 /* ... register committing continues. */
3304 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3305 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3306 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3307 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3308 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3309 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3310
3311 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3312 fEfl &= ~fEflToClear;
3313 IEMMISC_SET_EFL(pVCpu, fEfl);
3314
3315 if (fFlags & IEM_XCPT_FLAGS_CR2)
3316 pVCpu->cpum.GstCtx.cr2 = uCr2;
3317
3318 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3319 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3320
3321 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3322}
3323
3324
3325/**
3326 * Implements exceptions and interrupts for long mode.
3327 *
3328 * @returns VBox strict status code.
3329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3330 * @param cbInstr The number of bytes to offset rIP by in the return
3331 * address.
3332 * @param u8Vector The interrupt / exception vector number.
3333 * @param fFlags The flags.
3334 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3335 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3336 */
3337static VBOXSTRICTRC
3338iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3339 uint8_t cbInstr,
3340 uint8_t u8Vector,
3341 uint32_t fFlags,
3342 uint16_t uErr,
3343 uint64_t uCr2) RT_NOEXCEPT
3344{
3345 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3346
3347 /*
3348 * Read the IDT entry.
3349 */
3350 uint16_t offIdt = (uint16_t)u8Vector << 4;
3351 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3352 {
3353 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3354 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3355 }
3356 X86DESC64 Idte;
3357#ifdef _MSC_VER /* Shut up silly compiler warning. */
3358 Idte.au64[0] = 0;
3359 Idte.au64[1] = 0;
3360#endif
3361 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3362 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3363 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3364 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3365 {
3366 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3367 return rcStrict;
3368 }
3369 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3370 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3371 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3372
3373 /*
3374 * Check the descriptor type, DPL and such.
3375 * ASSUMES this is done in the same order as described for call-gate calls.
3376 */
3377 if (Idte.Gate.u1DescType)
3378 {
3379 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3380 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3381 }
3382 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3383 switch (Idte.Gate.u4Type)
3384 {
3385 case AMD64_SEL_TYPE_SYS_INT_GATE:
3386 fEflToClear |= X86_EFL_IF;
3387 break;
3388 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3389 break;
3390
3391 default:
3392 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3393 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3394 }
3395
3396 /* Check DPL against CPL if applicable. */
3397 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3398 {
3399 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3400 {
3401 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3402 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3403 }
3404 }
3405
3406 /* Is it there? */
3407 if (!Idte.Gate.u1Present)
3408 {
3409 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3410 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3411 }
3412
3413 /* A null CS is bad. */
3414 RTSEL NewCS = Idte.Gate.u16Sel;
3415 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3416 {
3417 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3418 return iemRaiseGeneralProtectionFault0(pVCpu);
3419 }
3420
3421 /* Fetch the descriptor for the new CS. */
3422 IEMSELDESC DescCS;
3423 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3424 if (rcStrict != VINF_SUCCESS)
3425 {
3426 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3427 return rcStrict;
3428 }
3429
3430 /* Must be a 64-bit code segment. */
3431 if (!DescCS.Long.Gen.u1DescType)
3432 {
3433 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3434 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3435 }
3436 if ( !DescCS.Long.Gen.u1Long
3437 || DescCS.Long.Gen.u1DefBig
3438 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3439 {
3440 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3441 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3442 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3443 }
3444
3445 /* Don't allow lowering the privilege level. For non-conforming CS
3446 selectors, the CS.DPL sets the privilege level the trap/interrupt
3447 handler runs at. For conforming CS selectors, the CPL remains
3448 unchanged, but the CS.DPL must be <= CPL. */
3449 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3450 * when CPU in Ring-0. Result \#GP? */
3451 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3452 {
3453 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3454 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3455 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3456 }
3457
3458
3459 /* Make sure the selector is present. */
3460 if (!DescCS.Legacy.Gen.u1Present)
3461 {
3462 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3463 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3464 }
3465
3466 /* Check that the new RIP is canonical. */
3467 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3468 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3469 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3470 if (!IEM_IS_CANONICAL(uNewRip))
3471 {
3472 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3473 return iemRaiseGeneralProtectionFault0(pVCpu);
3474 }
3475
3476 /*
3477 * If the privilege level changes or if the IST isn't zero, we need to get
3478 * a new stack from the TSS.
3479 */
3480 uint64_t uNewRsp;
3481 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3482 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3483 if ( uNewCpl != pVCpu->iem.s.uCpl
3484 || Idte.Gate.u3IST != 0)
3485 {
3486 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3487 if (rcStrict != VINF_SUCCESS)
3488 return rcStrict;
3489 }
3490 else
3491 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3492 uNewRsp &= ~(uint64_t)0xf;
3493
3494 /*
3495 * Calc the flag image to push.
3496 */
3497 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3498 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3499 fEfl &= ~X86_EFL_RF;
3500 else
3501 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3502
3503 /*
3504 * Start making changes.
3505 */
3506 /* Set the new CPL so that stack accesses use it. */
3507 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3508 pVCpu->iem.s.uCpl = uNewCpl;
3509
3510 /* Create the stack frame. */
3511 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3512 RTPTRUNION uStackFrame;
3513 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3514 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
3515 if (rcStrict != VINF_SUCCESS)
3516 return rcStrict;
3517 void * const pvStackFrame = uStackFrame.pv;
3518
3519 if (fFlags & IEM_XCPT_FLAGS_ERR)
3520 *uStackFrame.pu64++ = uErr;
3521 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3522 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3523 uStackFrame.pu64[2] = fEfl;
3524 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3525 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3526 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3527 if (rcStrict != VINF_SUCCESS)
3528 return rcStrict;
3529
3530 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3531 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3532 * after pushing the stack frame? (Write protect the gdt + stack to
3533 * find out.) */
3534 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3535 {
3536 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3537 if (rcStrict != VINF_SUCCESS)
3538 return rcStrict;
3539 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3540 }
3541
3542 /*
3543 * Start comitting the register changes.
3544 */
3545 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3546 * hidden registers when interrupting 32-bit or 16-bit code! */
3547 if (uNewCpl != uOldCpl)
3548 {
3549 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3550 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3551 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3552 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3553 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3554 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3555 }
3556 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3557 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3558 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3559 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3560 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3561 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3562 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3563 pVCpu->cpum.GstCtx.rip = uNewRip;
3564
3565 fEfl &= ~fEflToClear;
3566 IEMMISC_SET_EFL(pVCpu, fEfl);
3567
3568 if (fFlags & IEM_XCPT_FLAGS_CR2)
3569 pVCpu->cpum.GstCtx.cr2 = uCr2;
3570
3571 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3572 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3573
3574 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3575}
3576
3577
3578/**
3579 * Implements exceptions and interrupts.
3580 *
3581 * All exceptions and interrupts goes thru this function!
3582 *
3583 * @returns VBox strict status code.
3584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3585 * @param cbInstr The number of bytes to offset rIP by in the return
3586 * address.
3587 * @param u8Vector The interrupt / exception vector number.
3588 * @param fFlags The flags.
3589 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3590 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3591 */
3592VBOXSTRICTRC
3593iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3594 uint8_t cbInstr,
3595 uint8_t u8Vector,
3596 uint32_t fFlags,
3597 uint16_t uErr,
3598 uint64_t uCr2) RT_NOEXCEPT
3599{
3600 /*
3601 * Get all the state that we might need here.
3602 */
3603 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3604 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3605
3606#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3607 /*
3608 * Flush prefetch buffer
3609 */
3610 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3611#endif
3612
3613 /*
3614 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3615 */
3616 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3617 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3618 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3619 | IEM_XCPT_FLAGS_BP_INSTR
3620 | IEM_XCPT_FLAGS_ICEBP_INSTR
3621 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3622 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3623 {
3624 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3625 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3626 u8Vector = X86_XCPT_GP;
3627 uErr = 0;
3628 }
3629#ifdef DBGFTRACE_ENABLED
3630 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3631 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3632 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3633#endif
3634
3635 /*
3636 * Evaluate whether NMI blocking should be in effect.
3637 * Normally, NMI blocking is in effect whenever we inject an NMI.
3638 */
3639 bool fBlockNmi;
3640 if ( u8Vector == X86_XCPT_NMI
3641 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3642 fBlockNmi = true;
3643 else
3644 fBlockNmi = false;
3645
3646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3647 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3648 {
3649 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3650 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3651 return rcStrict0;
3652
3653 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3654 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3655 {
3656 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3657 fBlockNmi = false;
3658 }
3659 }
3660#endif
3661
3662#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3663 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3664 {
3665 /*
3666 * If the event is being injected as part of VMRUN, it isn't subject to event
3667 * intercepts in the nested-guest. However, secondary exceptions that occur
3668 * during injection of any event -are- subject to exception intercepts.
3669 *
3670 * See AMD spec. 15.20 "Event Injection".
3671 */
3672 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3673 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3674 else
3675 {
3676 /*
3677 * Check and handle if the event being raised is intercepted.
3678 */
3679 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3680 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3681 return rcStrict0;
3682 }
3683 }
3684#endif
3685
3686 /*
3687 * Set NMI blocking if necessary.
3688 */
3689 if ( fBlockNmi
3690 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3691 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3692
3693 /*
3694 * Do recursion accounting.
3695 */
3696 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3697 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3698 if (pVCpu->iem.s.cXcptRecursions == 0)
3699 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3700 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3701 else
3702 {
3703 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3704 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3705 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3706
3707 if (pVCpu->iem.s.cXcptRecursions >= 4)
3708 {
3709#ifdef DEBUG_bird
3710 AssertFailed();
3711#endif
3712 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3713 }
3714
3715 /*
3716 * Evaluate the sequence of recurring events.
3717 */
3718 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3719 NULL /* pXcptRaiseInfo */);
3720 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3721 { /* likely */ }
3722 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3723 {
3724 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3725 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3726 u8Vector = X86_XCPT_DF;
3727 uErr = 0;
3728#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3729 /* VMX nested-guest #DF intercept needs to be checked here. */
3730 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3731 {
3732 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3733 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3734 return rcStrict0;
3735 }
3736#endif
3737 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3738 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3739 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3740 }
3741 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3742 {
3743 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3744 return iemInitiateCpuShutdown(pVCpu);
3745 }
3746 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3747 {
3748 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3749 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3750 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3751 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3752 return VERR_EM_GUEST_CPU_HANG;
3753 }
3754 else
3755 {
3756 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3757 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3758 return VERR_IEM_IPE_9;
3759 }
3760
3761 /*
3762 * The 'EXT' bit is set when an exception occurs during deliver of an external
3763 * event (such as an interrupt or earlier exception)[1]. Privileged software
3764 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3765 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3766 *
3767 * [1] - Intel spec. 6.13 "Error Code"
3768 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3769 * [3] - Intel Instruction reference for INT n.
3770 */
3771 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3772 && (fFlags & IEM_XCPT_FLAGS_ERR)
3773 && u8Vector != X86_XCPT_PF
3774 && u8Vector != X86_XCPT_DF)
3775 {
3776 uErr |= X86_TRAP_ERR_EXTERNAL;
3777 }
3778 }
3779
3780 pVCpu->iem.s.cXcptRecursions++;
3781 pVCpu->iem.s.uCurXcpt = u8Vector;
3782 pVCpu->iem.s.fCurXcpt = fFlags;
3783 pVCpu->iem.s.uCurXcptErr = uErr;
3784 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3785
3786 /*
3787 * Extensive logging.
3788 */
3789#if defined(LOG_ENABLED) && defined(IN_RING3)
3790 if (LogIs3Enabled())
3791 {
3792 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3793 PVM pVM = pVCpu->CTX_SUFF(pVM);
3794 char szRegs[4096];
3795 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3796 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3797 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3798 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3799 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3800 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3801 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3802 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3803 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3804 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3805 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3806 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3807 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3808 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3809 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3810 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3811 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3812 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3813 " efer=%016VR{efer}\n"
3814 " pat=%016VR{pat}\n"
3815 " sf_mask=%016VR{sf_mask}\n"
3816 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3817 " lstar=%016VR{lstar}\n"
3818 " star=%016VR{star} cstar=%016VR{cstar}\n"
3819 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3820 );
3821
3822 char szInstr[256];
3823 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3824 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3825 szInstr, sizeof(szInstr), NULL);
3826 Log3(("%s%s\n", szRegs, szInstr));
3827 }
3828#endif /* LOG_ENABLED */
3829
3830 /*
3831 * Call the mode specific worker function.
3832 */
3833 VBOXSTRICTRC rcStrict;
3834 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3835 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3836 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3837 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3838 else
3839 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3840
3841 /* Flush the prefetch buffer. */
3842#ifdef IEM_WITH_CODE_TLB
3843 pVCpu->iem.s.pbInstrBuf = NULL;
3844#else
3845 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3846#endif
3847
3848 /*
3849 * Unwind.
3850 */
3851 pVCpu->iem.s.cXcptRecursions--;
3852 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3853 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3854 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3855 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3856 pVCpu->iem.s.cXcptRecursions + 1));
3857 return rcStrict;
3858}
3859
3860#ifdef IEM_WITH_SETJMP
3861/**
3862 * See iemRaiseXcptOrInt. Will not return.
3863 */
3864DECL_NO_RETURN(void)
3865iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3866 uint8_t cbInstr,
3867 uint8_t u8Vector,
3868 uint32_t fFlags,
3869 uint16_t uErr,
3870 uint64_t uCr2) RT_NOEXCEPT
3871{
3872 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3873 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3874}
3875#endif
3876
3877
3878/** \#DE - 00. */
3879VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3880{
3881 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3882}
3883
3884
3885/** \#DB - 01.
3886 * @note This automatically clear DR7.GD. */
3887VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3888{
3889 /** @todo set/clear RF. */
3890 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3891 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3892}
3893
3894
3895/** \#BR - 05. */
3896VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3897{
3898 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3899}
3900
3901
3902/** \#UD - 06. */
3903VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3904{
3905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3906}
3907
3908
3909/** \#NM - 07. */
3910VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3911{
3912 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3913}
3914
3915
3916/** \#TS(err) - 0a. */
3917VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3918{
3919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3920}
3921
3922
3923/** \#TS(tr) - 0a. */
3924VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3925{
3926 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3927 pVCpu->cpum.GstCtx.tr.Sel, 0);
3928}
3929
3930
3931/** \#TS(0) - 0a. */
3932VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3933{
3934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3935 0, 0);
3936}
3937
3938
3939/** \#TS(err) - 0a. */
3940VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3941{
3942 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3943 uSel & X86_SEL_MASK_OFF_RPL, 0);
3944}
3945
3946
3947/** \#NP(err) - 0b. */
3948VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3949{
3950 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3951}
3952
3953
3954/** \#NP(sel) - 0b. */
3955VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3956{
3957 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3958 uSel & ~X86_SEL_RPL, 0);
3959}
3960
3961
3962/** \#SS(seg) - 0c. */
3963VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3964{
3965 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3966 uSel & ~X86_SEL_RPL, 0);
3967}
3968
3969
3970/** \#SS(err) - 0c. */
3971VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3972{
3973 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3974}
3975
3976
3977/** \#GP(n) - 0d. */
3978VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3979{
3980 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3981}
3982
3983
3984/** \#GP(0) - 0d. */
3985VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3986{
3987 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3988}
3989
3990#ifdef IEM_WITH_SETJMP
3991/** \#GP(0) - 0d. */
3992DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
3993{
3994 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
3995}
3996#endif
3997
3998
3999/** \#GP(sel) - 0d. */
4000VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4001{
4002 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4003 Sel & ~X86_SEL_RPL, 0);
4004}
4005
4006
4007/** \#GP(0) - 0d. */
4008VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4009{
4010 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4011}
4012
4013
4014/** \#GP(sel) - 0d. */
4015VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4016{
4017 NOREF(iSegReg); NOREF(fAccess);
4018 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4019 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4020}
4021
4022#ifdef IEM_WITH_SETJMP
4023/** \#GP(sel) - 0d, longjmp. */
4024DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4025{
4026 NOREF(iSegReg); NOREF(fAccess);
4027 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4028 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4029}
4030#endif
4031
4032/** \#GP(sel) - 0d. */
4033VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4034{
4035 NOREF(Sel);
4036 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4037}
4038
4039#ifdef IEM_WITH_SETJMP
4040/** \#GP(sel) - 0d, longjmp. */
4041DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4042{
4043 NOREF(Sel);
4044 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4045}
4046#endif
4047
4048
4049/** \#GP(sel) - 0d. */
4050VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4051{
4052 NOREF(iSegReg); NOREF(fAccess);
4053 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4054}
4055
4056#ifdef IEM_WITH_SETJMP
4057/** \#GP(sel) - 0d, longjmp. */
4058DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4059{
4060 NOREF(iSegReg); NOREF(fAccess);
4061 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4062}
4063#endif
4064
4065
4066/** \#PF(n) - 0e. */
4067VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4068{
4069 uint16_t uErr;
4070 switch (rc)
4071 {
4072 case VERR_PAGE_NOT_PRESENT:
4073 case VERR_PAGE_TABLE_NOT_PRESENT:
4074 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4075 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4076 uErr = 0;
4077 break;
4078
4079 default:
4080 AssertMsgFailed(("%Rrc\n", rc));
4081 RT_FALL_THRU();
4082 case VERR_ACCESS_DENIED:
4083 uErr = X86_TRAP_PF_P;
4084 break;
4085
4086 /** @todo reserved */
4087 }
4088
4089 if (pVCpu->iem.s.uCpl == 3)
4090 uErr |= X86_TRAP_PF_US;
4091
4092 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4093 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4094 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4095 uErr |= X86_TRAP_PF_ID;
4096
4097#if 0 /* This is so much non-sense, really. Why was it done like that? */
4098 /* Note! RW access callers reporting a WRITE protection fault, will clear
4099 the READ flag before calling. So, read-modify-write accesses (RW)
4100 can safely be reported as READ faults. */
4101 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4102 uErr |= X86_TRAP_PF_RW;
4103#else
4104 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4105 {
4106 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4107 /// (regardless of outcome of the comparison in the latter case).
4108 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4109 uErr |= X86_TRAP_PF_RW;
4110 }
4111#endif
4112
4113 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4114 uErr, GCPtrWhere);
4115}
4116
4117#ifdef IEM_WITH_SETJMP
4118/** \#PF(n) - 0e, longjmp. */
4119DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4120{
4121 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4122}
4123#endif
4124
4125
4126/** \#MF(0) - 10. */
4127VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4128{
4129 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4130}
4131
4132
4133/** \#AC(0) - 11. */
4134VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4135{
4136 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4137}
4138
4139#ifdef IEM_WITH_SETJMP
4140/** \#AC(0) - 11, longjmp. */
4141DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4142{
4143 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4144}
4145#endif
4146
4147
4148/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4149IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4150{
4151 NOREF(cbInstr);
4152 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4153}
4154
4155
4156/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4157IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4158{
4159 NOREF(cbInstr);
4160 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4161}
4162
4163
4164/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4165IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4166{
4167 NOREF(cbInstr);
4168 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4169}
4170
4171
4172/** @} */
4173
4174/** @name Common opcode decoders.
4175 * @{
4176 */
4177//#include <iprt/mem.h>
4178
4179/**
4180 * Used to add extra details about a stub case.
4181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4182 */
4183void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4184{
4185#if defined(LOG_ENABLED) && defined(IN_RING3)
4186 PVM pVM = pVCpu->CTX_SUFF(pVM);
4187 char szRegs[4096];
4188 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4189 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4190 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4191 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4192 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4193 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4194 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4195 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4196 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4197 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4198 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4199 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4200 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4201 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4202 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4203 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4204 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4205 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4206 " efer=%016VR{efer}\n"
4207 " pat=%016VR{pat}\n"
4208 " sf_mask=%016VR{sf_mask}\n"
4209 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4210 " lstar=%016VR{lstar}\n"
4211 " star=%016VR{star} cstar=%016VR{cstar}\n"
4212 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4213 );
4214
4215 char szInstr[256];
4216 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4217 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4218 szInstr, sizeof(szInstr), NULL);
4219
4220 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4221#else
4222 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4223#endif
4224}
4225
4226/** @} */
4227
4228
4229
4230/** @name Register Access.
4231 * @{
4232 */
4233
4234/**
4235 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4236 *
4237 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4238 * segment limit.
4239 *
4240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4241 * @param offNextInstr The offset of the next instruction.
4242 */
4243VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4244{
4245 switch (pVCpu->iem.s.enmEffOpSize)
4246 {
4247 case IEMMODE_16BIT:
4248 {
4249 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4250 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4251 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4252 return iemRaiseGeneralProtectionFault0(pVCpu);
4253 pVCpu->cpum.GstCtx.rip = uNewIp;
4254 break;
4255 }
4256
4257 case IEMMODE_32BIT:
4258 {
4259 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4260 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4261
4262 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4263 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4264 return iemRaiseGeneralProtectionFault0(pVCpu);
4265 pVCpu->cpum.GstCtx.rip = uNewEip;
4266 break;
4267 }
4268
4269 case IEMMODE_64BIT:
4270 {
4271 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4272
4273 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4274 if (!IEM_IS_CANONICAL(uNewRip))
4275 return iemRaiseGeneralProtectionFault0(pVCpu);
4276 pVCpu->cpum.GstCtx.rip = uNewRip;
4277 break;
4278 }
4279
4280 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4281 }
4282
4283 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4284
4285#ifndef IEM_WITH_CODE_TLB
4286 /* Flush the prefetch buffer. */
4287 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4288#endif
4289
4290 return VINF_SUCCESS;
4291}
4292
4293
4294/**
4295 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4296 *
4297 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4298 * segment limit.
4299 *
4300 * @returns Strict VBox status code.
4301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4302 * @param offNextInstr The offset of the next instruction.
4303 */
4304VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4305{
4306 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4307
4308 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4309 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4310 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4311 return iemRaiseGeneralProtectionFault0(pVCpu);
4312 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4313 pVCpu->cpum.GstCtx.rip = uNewIp;
4314 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4315
4316#ifndef IEM_WITH_CODE_TLB
4317 /* Flush the prefetch buffer. */
4318 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4319#endif
4320
4321 return VINF_SUCCESS;
4322}
4323
4324
4325/**
4326 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4327 *
4328 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4329 * segment limit.
4330 *
4331 * @returns Strict VBox status code.
4332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4333 * @param offNextInstr The offset of the next instruction.
4334 */
4335VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4336{
4337 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4338
4339 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4340 {
4341 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4342
4343 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4344 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4345 return iemRaiseGeneralProtectionFault0(pVCpu);
4346 pVCpu->cpum.GstCtx.rip = uNewEip;
4347 }
4348 else
4349 {
4350 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4351
4352 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4353 if (!IEM_IS_CANONICAL(uNewRip))
4354 return iemRaiseGeneralProtectionFault0(pVCpu);
4355 pVCpu->cpum.GstCtx.rip = uNewRip;
4356 }
4357 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4358
4359#ifndef IEM_WITH_CODE_TLB
4360 /* Flush the prefetch buffer. */
4361 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4362#endif
4363
4364 return VINF_SUCCESS;
4365}
4366
4367
4368/**
4369 * Performs a near jump to the specified address.
4370 *
4371 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4372 * segment limit.
4373 *
4374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4375 * @param uNewRip The new RIP value.
4376 */
4377VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4378{
4379 switch (pVCpu->iem.s.enmEffOpSize)
4380 {
4381 case IEMMODE_16BIT:
4382 {
4383 Assert(uNewRip <= UINT16_MAX);
4384 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4385 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4386 return iemRaiseGeneralProtectionFault0(pVCpu);
4387 /** @todo Test 16-bit jump in 64-bit mode. */
4388 pVCpu->cpum.GstCtx.rip = uNewRip;
4389 break;
4390 }
4391
4392 case IEMMODE_32BIT:
4393 {
4394 Assert(uNewRip <= UINT32_MAX);
4395 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4396 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4397
4398 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4399 return iemRaiseGeneralProtectionFault0(pVCpu);
4400 pVCpu->cpum.GstCtx.rip = uNewRip;
4401 break;
4402 }
4403
4404 case IEMMODE_64BIT:
4405 {
4406 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4407
4408 if (!IEM_IS_CANONICAL(uNewRip))
4409 return iemRaiseGeneralProtectionFault0(pVCpu);
4410 pVCpu->cpum.GstCtx.rip = uNewRip;
4411 break;
4412 }
4413
4414 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4415 }
4416
4417 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4418
4419#ifndef IEM_WITH_CODE_TLB
4420 /* Flush the prefetch buffer. */
4421 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4422#endif
4423
4424 return VINF_SUCCESS;
4425}
4426
4427/** @} */
4428
4429
4430/** @name FPU access and helpers.
4431 *
4432 * @{
4433 */
4434
4435/**
4436 * Updates the x87.DS and FPUDP registers.
4437 *
4438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4439 * @param pFpuCtx The FPU context.
4440 * @param iEffSeg The effective segment register.
4441 * @param GCPtrEff The effective address relative to @a iEffSeg.
4442 */
4443DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4444{
4445 RTSEL sel;
4446 switch (iEffSeg)
4447 {
4448 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4449 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4450 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4451 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4452 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4453 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4454 default:
4455 AssertMsgFailed(("%d\n", iEffSeg));
4456 sel = pVCpu->cpum.GstCtx.ds.Sel;
4457 }
4458 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4459 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4460 {
4461 pFpuCtx->DS = 0;
4462 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4463 }
4464 else if (!IEM_IS_LONG_MODE(pVCpu))
4465 {
4466 pFpuCtx->DS = sel;
4467 pFpuCtx->FPUDP = GCPtrEff;
4468 }
4469 else
4470 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4471}
4472
4473
4474/**
4475 * Rotates the stack registers in the push direction.
4476 *
4477 * @param pFpuCtx The FPU context.
4478 * @remarks This is a complete waste of time, but fxsave stores the registers in
4479 * stack order.
4480 */
4481DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4482{
4483 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4484 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4485 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4486 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4487 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4488 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4489 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4490 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4491 pFpuCtx->aRegs[0].r80 = r80Tmp;
4492}
4493
4494
4495/**
4496 * Rotates the stack registers in the pop direction.
4497 *
4498 * @param pFpuCtx The FPU context.
4499 * @remarks This is a complete waste of time, but fxsave stores the registers in
4500 * stack order.
4501 */
4502DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4503{
4504 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4505 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4506 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4507 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4508 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4509 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4510 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4511 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4512 pFpuCtx->aRegs[7].r80 = r80Tmp;
4513}
4514
4515
4516/**
4517 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4518 * exception prevents it.
4519 *
4520 * @param pResult The FPU operation result to push.
4521 * @param pFpuCtx The FPU context.
4522 */
4523static void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4524{
4525 /* Update FSW and bail if there are pending exceptions afterwards. */
4526 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4527 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4528 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4529 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4530 {
4531 pFpuCtx->FSW = fFsw;
4532 return;
4533 }
4534
4535 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4536 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4537 {
4538 /* All is fine, push the actual value. */
4539 pFpuCtx->FTW |= RT_BIT(iNewTop);
4540 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4541 }
4542 else if (pFpuCtx->FCW & X86_FCW_IM)
4543 {
4544 /* Masked stack overflow, push QNaN. */
4545 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4546 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4547 }
4548 else
4549 {
4550 /* Raise stack overflow, don't push anything. */
4551 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4552 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4553 return;
4554 }
4555
4556 fFsw &= ~X86_FSW_TOP_MASK;
4557 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4558 pFpuCtx->FSW = fFsw;
4559
4560 iemFpuRotateStackPush(pFpuCtx);
4561}
4562
4563
4564/**
4565 * Stores a result in a FPU register and updates the FSW and FTW.
4566 *
4567 * @param pFpuCtx The FPU context.
4568 * @param pResult The result to store.
4569 * @param iStReg Which FPU register to store it in.
4570 */
4571static void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4572{
4573 Assert(iStReg < 8);
4574 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4575 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4576 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
4577 pFpuCtx->FTW |= RT_BIT(iReg);
4578 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4579}
4580
4581
4582/**
4583 * Only updates the FPU status word (FSW) with the result of the current
4584 * instruction.
4585 *
4586 * @param pFpuCtx The FPU context.
4587 * @param u16FSW The FSW output of the current instruction.
4588 */
4589static void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4590{
4591 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4592 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
4593}
4594
4595
4596/**
4597 * Pops one item off the FPU stack if no pending exception prevents it.
4598 *
4599 * @param pFpuCtx The FPU context.
4600 */
4601static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4602{
4603 /* Check pending exceptions. */
4604 uint16_t uFSW = pFpuCtx->FSW;
4605 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4606 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4607 return;
4608
4609 /* TOP--. */
4610 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4611 uFSW &= ~X86_FSW_TOP_MASK;
4612 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4613 pFpuCtx->FSW = uFSW;
4614
4615 /* Mark the previous ST0 as empty. */
4616 iOldTop >>= X86_FSW_TOP_SHIFT;
4617 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4618
4619 /* Rotate the registers. */
4620 iemFpuRotateStackPop(pFpuCtx);
4621}
4622
4623
4624/**
4625 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4626 *
4627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4628 * @param pResult The FPU operation result to push.
4629 */
4630void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4631{
4632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4633 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4634 iemFpuMaybePushResult(pResult, pFpuCtx);
4635}
4636
4637
4638/**
4639 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4640 * and sets FPUDP and FPUDS.
4641 *
4642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4643 * @param pResult The FPU operation result to push.
4644 * @param iEffSeg The effective segment register.
4645 * @param GCPtrEff The effective address relative to @a iEffSeg.
4646 */
4647void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4648{
4649 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4650 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4651 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4652 iemFpuMaybePushResult(pResult, pFpuCtx);
4653}
4654
4655
4656/**
4657 * Replace ST0 with the first value and push the second onto the FPU stack,
4658 * unless a pending exception prevents it.
4659 *
4660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4661 * @param pResult The FPU operation result to store and push.
4662 */
4663void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4664{
4665 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4666 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4667
4668 /* Update FSW and bail if there are pending exceptions afterwards. */
4669 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4670 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4671 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4672 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4673 {
4674 pFpuCtx->FSW = fFsw;
4675 return;
4676 }
4677
4678 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4679 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4680 {
4681 /* All is fine, push the actual value. */
4682 pFpuCtx->FTW |= RT_BIT(iNewTop);
4683 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4684 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4685 }
4686 else if (pFpuCtx->FCW & X86_FCW_IM)
4687 {
4688 /* Masked stack overflow, push QNaN. */
4689 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4690 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4691 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4692 }
4693 else
4694 {
4695 /* Raise stack overflow, don't push anything. */
4696 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4697 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4698 return;
4699 }
4700
4701 fFsw &= ~X86_FSW_TOP_MASK;
4702 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4703 pFpuCtx->FSW = fFsw;
4704
4705 iemFpuRotateStackPush(pFpuCtx);
4706}
4707
4708
4709/**
4710 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4711 * FOP.
4712 *
4713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4714 * @param pResult The result to store.
4715 * @param iStReg Which FPU register to store it in.
4716 */
4717void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4718{
4719 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4720 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4721 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4722}
4723
4724
4725/**
4726 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4727 * FOP, and then pops the stack.
4728 *
4729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4730 * @param pResult The result to store.
4731 * @param iStReg Which FPU register to store it in.
4732 */
4733void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4734{
4735 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4736 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4737 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4738 iemFpuMaybePopOne(pFpuCtx);
4739}
4740
4741
4742/**
4743 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4744 * FPUDP, and FPUDS.
4745 *
4746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4747 * @param pResult The result to store.
4748 * @param iStReg Which FPU register to store it in.
4749 * @param iEffSeg The effective memory operand selector register.
4750 * @param GCPtrEff The effective memory operand offset.
4751 */
4752void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4753 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4754{
4755 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4756 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4757 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4758 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4759}
4760
4761
4762/**
4763 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4764 * FPUDP, and FPUDS, and then pops the stack.
4765 *
4766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4767 * @param pResult The result to store.
4768 * @param iStReg Which FPU register to store it in.
4769 * @param iEffSeg The effective memory operand selector register.
4770 * @param GCPtrEff The effective memory operand offset.
4771 */
4772void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4773 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4774{
4775 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4776 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4777 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4778 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
4779 iemFpuMaybePopOne(pFpuCtx);
4780}
4781
4782
4783/**
4784 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4785 *
4786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4787 */
4788void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4789{
4790 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4791 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4792}
4793
4794
4795/**
4796 * Updates the FSW, FOP, FPUIP, and FPUCS.
4797 *
4798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4799 * @param u16FSW The FSW from the current instruction.
4800 */
4801void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4802{
4803 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4804 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4805 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4806}
4807
4808
4809/**
4810 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4811 *
4812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4813 * @param u16FSW The FSW from the current instruction.
4814 */
4815void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4816{
4817 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4818 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4819 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4820 iemFpuMaybePopOne(pFpuCtx);
4821}
4822
4823
4824/**
4825 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4826 *
4827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4828 * @param u16FSW The FSW from the current instruction.
4829 * @param iEffSeg The effective memory operand selector register.
4830 * @param GCPtrEff The effective memory operand offset.
4831 */
4832void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4833{
4834 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4835 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4836 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4837 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4838}
4839
4840
4841/**
4842 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4843 *
4844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4845 * @param u16FSW The FSW from the current instruction.
4846 */
4847void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4848{
4849 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4850 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4851 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4852 iemFpuMaybePopOne(pFpuCtx);
4853 iemFpuMaybePopOne(pFpuCtx);
4854}
4855
4856
4857/**
4858 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4859 *
4860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4861 * @param u16FSW The FSW from the current instruction.
4862 * @param iEffSeg The effective memory operand selector register.
4863 * @param GCPtrEff The effective memory operand offset.
4864 */
4865void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4866{
4867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4868 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4869 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4870 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
4871 iemFpuMaybePopOne(pFpuCtx);
4872}
4873
4874
4875/**
4876 * Worker routine for raising an FPU stack underflow exception.
4877 *
4878 * @param pFpuCtx The FPU context.
4879 * @param iStReg The stack register being accessed.
4880 */
4881static void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
4882{
4883 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4884 if (pFpuCtx->FCW & X86_FCW_IM)
4885 {
4886 /* Masked underflow. */
4887 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4888 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4889 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4890 if (iStReg != UINT8_MAX)
4891 {
4892 pFpuCtx->FTW |= RT_BIT(iReg);
4893 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4894 }
4895 }
4896 else
4897 {
4898 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4899 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4900 }
4901}
4902
4903
4904/**
4905 * Raises a FPU stack underflow exception.
4906 *
4907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4908 * @param iStReg The destination register that should be loaded
4909 * with QNaN if \#IS is not masked. Specify
4910 * UINT8_MAX if none (like for fcom).
4911 */
4912void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4913{
4914 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4915 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4916 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4917}
4918
4919
4920void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4921{
4922 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4923 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4924 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4925 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4926}
4927
4928
4929void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4930{
4931 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4932 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4933 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4934 iemFpuMaybePopOne(pFpuCtx);
4935}
4936
4937
4938void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4939{
4940 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4941 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4942 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4943 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
4944 iemFpuMaybePopOne(pFpuCtx);
4945}
4946
4947
4948void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
4949{
4950 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4951 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4952 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
4953 iemFpuMaybePopOne(pFpuCtx);
4954 iemFpuMaybePopOne(pFpuCtx);
4955}
4956
4957
4958void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
4959{
4960 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4961 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4962
4963 if (pFpuCtx->FCW & X86_FCW_IM)
4964 {
4965 /* Masked overflow - Push QNaN. */
4966 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4967 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4968 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4969 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4970 pFpuCtx->FTW |= RT_BIT(iNewTop);
4971 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4972 iemFpuRotateStackPush(pFpuCtx);
4973 }
4974 else
4975 {
4976 /* Exception pending - don't change TOP or the register stack. */
4977 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4978 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4979 }
4980}
4981
4982
4983void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
4984{
4985 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4986 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4987
4988 if (pFpuCtx->FCW & X86_FCW_IM)
4989 {
4990 /* Masked overflow - Push QNaN. */
4991 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
4992 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
4993 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4994 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
4995 pFpuCtx->FTW |= RT_BIT(iNewTop);
4996 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4997 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4998 iemFpuRotateStackPush(pFpuCtx);
4999 }
5000 else
5001 {
5002 /* Exception pending - don't change TOP or the register stack. */
5003 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5004 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5005 }
5006}
5007
5008
5009/**
5010 * Worker routine for raising an FPU stack overflow exception on a push.
5011 *
5012 * @param pFpuCtx The FPU context.
5013 */
5014static void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5015{
5016 if (pFpuCtx->FCW & X86_FCW_IM)
5017 {
5018 /* Masked overflow. */
5019 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5020 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5021 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5022 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5023 pFpuCtx->FTW |= RT_BIT(iNewTop);
5024 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5025 iemFpuRotateStackPush(pFpuCtx);
5026 }
5027 else
5028 {
5029 /* Exception pending - don't change TOP or the register stack. */
5030 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5031 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5032 }
5033}
5034
5035
5036/**
5037 * Raises a FPU stack overflow exception on a push.
5038 *
5039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5040 */
5041void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5042{
5043 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5044 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5045 iemFpuStackPushOverflowOnly(pFpuCtx);
5046}
5047
5048
5049/**
5050 * Raises a FPU stack overflow exception on a push with a memory operand.
5051 *
5052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5053 * @param iEffSeg The effective memory operand selector register.
5054 * @param GCPtrEff The effective memory operand offset.
5055 */
5056void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5057{
5058 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5059 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5060 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5061 iemFpuStackPushOverflowOnly(pFpuCtx);
5062}
5063
5064/** @} */
5065
5066
5067/** @name Memory access.
5068 *
5069 * @{
5070 */
5071
5072
5073/**
5074 * Updates the IEMCPU::cbWritten counter if applicable.
5075 *
5076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5077 * @param fAccess The access being accounted for.
5078 * @param cbMem The access size.
5079 */
5080DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5081{
5082 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5083 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5084 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5085}
5086
5087
5088/**
5089 * Applies the segment limit, base and attributes.
5090 *
5091 * This may raise a \#GP or \#SS.
5092 *
5093 * @returns VBox strict status code.
5094 *
5095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5096 * @param fAccess The kind of access which is being performed.
5097 * @param iSegReg The index of the segment register to apply.
5098 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5099 * TSS, ++).
5100 * @param cbMem The access size.
5101 * @param pGCPtrMem Pointer to the guest memory address to apply
5102 * segmentation to. Input and output parameter.
5103 */
5104VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5105{
5106 if (iSegReg == UINT8_MAX)
5107 return VINF_SUCCESS;
5108
5109 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5110 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5111 switch (pVCpu->iem.s.enmCpuMode)
5112 {
5113 case IEMMODE_16BIT:
5114 case IEMMODE_32BIT:
5115 {
5116 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5117 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5118
5119 if ( pSel->Attr.n.u1Present
5120 && !pSel->Attr.n.u1Unusable)
5121 {
5122 Assert(pSel->Attr.n.u1DescType);
5123 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5124 {
5125 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5126 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5127 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5128
5129 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5130 {
5131 /** @todo CPL check. */
5132 }
5133
5134 /*
5135 * There are two kinds of data selectors, normal and expand down.
5136 */
5137 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5138 {
5139 if ( GCPtrFirst32 > pSel->u32Limit
5140 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5141 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5142 }
5143 else
5144 {
5145 /*
5146 * The upper boundary is defined by the B bit, not the G bit!
5147 */
5148 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5149 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5150 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5151 }
5152 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5153 }
5154 else
5155 {
5156 /*
5157 * Code selector and usually be used to read thru, writing is
5158 * only permitted in real and V8086 mode.
5159 */
5160 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5161 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5162 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5163 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5164 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5165
5166 if ( GCPtrFirst32 > pSel->u32Limit
5167 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5168 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5169
5170 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5171 {
5172 /** @todo CPL check. */
5173 }
5174
5175 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5176 }
5177 }
5178 else
5179 return iemRaiseGeneralProtectionFault0(pVCpu);
5180 return VINF_SUCCESS;
5181 }
5182
5183 case IEMMODE_64BIT:
5184 {
5185 RTGCPTR GCPtrMem = *pGCPtrMem;
5186 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5187 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5188
5189 Assert(cbMem >= 1);
5190 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5191 return VINF_SUCCESS;
5192 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5193 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5194 return iemRaiseGeneralProtectionFault0(pVCpu);
5195 }
5196
5197 default:
5198 AssertFailedReturn(VERR_IEM_IPE_7);
5199 }
5200}
5201
5202
5203/**
5204 * Translates a virtual address to a physical physical address and checks if we
5205 * can access the page as specified.
5206 *
5207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5208 * @param GCPtrMem The virtual address.
5209 * @param fAccess The intended access.
5210 * @param pGCPhysMem Where to return the physical address.
5211 */
5212VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5213{
5214 /** @todo Need a different PGM interface here. We're currently using
5215 * generic / REM interfaces. this won't cut it for R0. */
5216 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5217 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5218 * here. */
5219 PGMPTWALK Walk;
5220 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5221 if (RT_FAILURE(rc))
5222 {
5223 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5224 /** @todo Check unassigned memory in unpaged mode. */
5225 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5226#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5227 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5228 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5229#endif
5230 *pGCPhysMem = NIL_RTGCPHYS;
5231 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5232 }
5233
5234 /* If the page is writable and does not have the no-exec bit set, all
5235 access is allowed. Otherwise we'll have to check more carefully... */
5236 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5237 {
5238 /* Write to read only memory? */
5239 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5240 && !(Walk.fEffective & X86_PTE_RW)
5241 && ( ( pVCpu->iem.s.uCpl == 3
5242 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5243 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5244 {
5245 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5246 *pGCPhysMem = NIL_RTGCPHYS;
5247#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5248 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5249 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5250#endif
5251 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5252 }
5253
5254 /* Kernel memory accessed by userland? */
5255 if ( !(Walk.fEffective & X86_PTE_US)
5256 && pVCpu->iem.s.uCpl == 3
5257 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5258 {
5259 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5260 *pGCPhysMem = NIL_RTGCPHYS;
5261#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5262 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5263 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5264#endif
5265 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5266 }
5267
5268 /* Executing non-executable memory? */
5269 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5270 && (Walk.fEffective & X86_PTE_PAE_NX)
5271 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5272 {
5273 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5274 *pGCPhysMem = NIL_RTGCPHYS;
5275#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5276 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5277 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5278#endif
5279 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5280 VERR_ACCESS_DENIED);
5281 }
5282 }
5283
5284 /*
5285 * Set the dirty / access flags.
5286 * ASSUMES this is set when the address is translated rather than on committ...
5287 */
5288 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5289 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5290 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5291 {
5292 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5293 AssertRC(rc2);
5294 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5295 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5296 }
5297
5298 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5299 *pGCPhysMem = GCPhys;
5300 return VINF_SUCCESS;
5301}
5302
5303
5304/**
5305 * Looks up a memory mapping entry.
5306 *
5307 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5309 * @param pvMem The memory address.
5310 * @param fAccess The access to.
5311 */
5312DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5313{
5314 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5315 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5316 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5317 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5318 return 0;
5319 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5320 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5321 return 1;
5322 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5323 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5324 return 2;
5325 return VERR_NOT_FOUND;
5326}
5327
5328
5329/**
5330 * Finds a free memmap entry when using iNextMapping doesn't work.
5331 *
5332 * @returns Memory mapping index, 1024 on failure.
5333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5334 */
5335static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5336{
5337 /*
5338 * The easy case.
5339 */
5340 if (pVCpu->iem.s.cActiveMappings == 0)
5341 {
5342 pVCpu->iem.s.iNextMapping = 1;
5343 return 0;
5344 }
5345
5346 /* There should be enough mappings for all instructions. */
5347 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5348
5349 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5350 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5351 return i;
5352
5353 AssertFailedReturn(1024);
5354}
5355
5356
5357/**
5358 * Commits a bounce buffer that needs writing back and unmaps it.
5359 *
5360 * @returns Strict VBox status code.
5361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5362 * @param iMemMap The index of the buffer to commit.
5363 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5364 * Always false in ring-3, obviously.
5365 */
5366static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5367{
5368 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5369 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5370#ifdef IN_RING3
5371 Assert(!fPostponeFail);
5372 RT_NOREF_PV(fPostponeFail);
5373#endif
5374
5375 /*
5376 * Do the writing.
5377 */
5378 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5379 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5380 {
5381 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5382 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5383 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5384 if (!pVCpu->iem.s.fBypassHandlers)
5385 {
5386 /*
5387 * Carefully and efficiently dealing with access handler return
5388 * codes make this a little bloated.
5389 */
5390 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5392 pbBuf,
5393 cbFirst,
5394 PGMACCESSORIGIN_IEM);
5395 if (rcStrict == VINF_SUCCESS)
5396 {
5397 if (cbSecond)
5398 {
5399 rcStrict = PGMPhysWrite(pVM,
5400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5401 pbBuf + cbFirst,
5402 cbSecond,
5403 PGMACCESSORIGIN_IEM);
5404 if (rcStrict == VINF_SUCCESS)
5405 { /* nothing */ }
5406 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5407 {
5408 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5411 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5412 }
5413#ifndef IN_RING3
5414 else if (fPostponeFail)
5415 {
5416 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5419 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5420 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5421 return iemSetPassUpStatus(pVCpu, rcStrict);
5422 }
5423#endif
5424 else
5425 {
5426 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5429 return rcStrict;
5430 }
5431 }
5432 }
5433 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5434 {
5435 if (!cbSecond)
5436 {
5437 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5439 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5440 }
5441 else
5442 {
5443 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5445 pbBuf + cbFirst,
5446 cbSecond,
5447 PGMACCESSORIGIN_IEM);
5448 if (rcStrict2 == VINF_SUCCESS)
5449 {
5450 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5451 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5453 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5454 }
5455 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5456 {
5457 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5460 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5461 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5462 }
5463#ifndef IN_RING3
5464 else if (fPostponeFail)
5465 {
5466 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5469 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5470 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5471 return iemSetPassUpStatus(pVCpu, rcStrict);
5472 }
5473#endif
5474 else
5475 {
5476 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5479 return rcStrict2;
5480 }
5481 }
5482 }
5483#ifndef IN_RING3
5484 else if (fPostponeFail)
5485 {
5486 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5489 if (!cbSecond)
5490 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5491 else
5492 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5493 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5494 return iemSetPassUpStatus(pVCpu, rcStrict);
5495 }
5496#endif
5497 else
5498 {
5499 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5500 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5502 return rcStrict;
5503 }
5504 }
5505 else
5506 {
5507 /*
5508 * No access handlers, much simpler.
5509 */
5510 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5511 if (RT_SUCCESS(rc))
5512 {
5513 if (cbSecond)
5514 {
5515 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5516 if (RT_SUCCESS(rc))
5517 { /* likely */ }
5518 else
5519 {
5520 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5521 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5523 return rc;
5524 }
5525 }
5526 }
5527 else
5528 {
5529 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5530 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5531 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5532 return rc;
5533 }
5534 }
5535 }
5536
5537#if defined(IEM_LOG_MEMORY_WRITES)
5538 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5539 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5540 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5541 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5542 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5543 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5544
5545 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5546 g_cbIemWrote = cbWrote;
5547 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5548#endif
5549
5550 /*
5551 * Free the mapping entry.
5552 */
5553 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5554 Assert(pVCpu->iem.s.cActiveMappings != 0);
5555 pVCpu->iem.s.cActiveMappings--;
5556 return VINF_SUCCESS;
5557}
5558
5559
5560/**
5561 * iemMemMap worker that deals with a request crossing pages.
5562 */
5563static VBOXSTRICTRC
5564iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5565{
5566 /*
5567 * Do the address translations.
5568 */
5569 RTGCPHYS GCPhysFirst;
5570 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5571 if (rcStrict != VINF_SUCCESS)
5572 return rcStrict;
5573
5574 RTGCPHYS GCPhysSecond;
5575 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5576 fAccess, &GCPhysSecond);
5577 if (rcStrict != VINF_SUCCESS)
5578 return rcStrict;
5579 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5580
5581 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5582
5583 /*
5584 * Read in the current memory content if it's a read, execute or partial
5585 * write access.
5586 */
5587 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5588 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5589 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5590
5591 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5592 {
5593 if (!pVCpu->iem.s.fBypassHandlers)
5594 {
5595 /*
5596 * Must carefully deal with access handler status codes here,
5597 * makes the code a bit bloated.
5598 */
5599 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5600 if (rcStrict == VINF_SUCCESS)
5601 {
5602 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5603 if (rcStrict == VINF_SUCCESS)
5604 { /*likely */ }
5605 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5606 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5607 else
5608 {
5609 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5610 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5611 return rcStrict;
5612 }
5613 }
5614 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5615 {
5616 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5617 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5618 {
5619 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5620 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5621 }
5622 else
5623 {
5624 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5625 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5626 return rcStrict2;
5627 }
5628 }
5629 else
5630 {
5631 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5632 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5633 return rcStrict;
5634 }
5635 }
5636 else
5637 {
5638 /*
5639 * No informational status codes here, much more straight forward.
5640 */
5641 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5642 if (RT_SUCCESS(rc))
5643 {
5644 Assert(rc == VINF_SUCCESS);
5645 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5646 if (RT_SUCCESS(rc))
5647 Assert(rc == VINF_SUCCESS);
5648 else
5649 {
5650 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5651 return rc;
5652 }
5653 }
5654 else
5655 {
5656 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5657 return rc;
5658 }
5659 }
5660 }
5661#ifdef VBOX_STRICT
5662 else
5663 memset(pbBuf, 0xcc, cbMem);
5664 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5665 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5666#endif
5667
5668 /*
5669 * Commit the bounce buffer entry.
5670 */
5671 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5672 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5673 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5674 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5675 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5676 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5677 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5678 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5679 pVCpu->iem.s.cActiveMappings++;
5680
5681 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5682 *ppvMem = pbBuf;
5683 return VINF_SUCCESS;
5684}
5685
5686
5687/**
5688 * iemMemMap woker that deals with iemMemPageMap failures.
5689 */
5690static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5691 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5692{
5693 /*
5694 * Filter out conditions we can handle and the ones which shouldn't happen.
5695 */
5696 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5697 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5698 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5699 {
5700 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5701 return rcMap;
5702 }
5703 pVCpu->iem.s.cPotentialExits++;
5704
5705 /*
5706 * Read in the current memory content if it's a read, execute or partial
5707 * write access.
5708 */
5709 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5710 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5711 {
5712 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5713 memset(pbBuf, 0xff, cbMem);
5714 else
5715 {
5716 int rc;
5717 if (!pVCpu->iem.s.fBypassHandlers)
5718 {
5719 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5720 if (rcStrict == VINF_SUCCESS)
5721 { /* nothing */ }
5722 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5723 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5724 else
5725 {
5726 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5727 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5728 return rcStrict;
5729 }
5730 }
5731 else
5732 {
5733 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5734 if (RT_SUCCESS(rc))
5735 { /* likely */ }
5736 else
5737 {
5738 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5739 GCPhysFirst, rc));
5740 return rc;
5741 }
5742 }
5743 }
5744 }
5745#ifdef VBOX_STRICT
5746 else
5747 memset(pbBuf, 0xcc, cbMem);
5748#endif
5749#ifdef VBOX_STRICT
5750 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5751 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5752#endif
5753
5754 /*
5755 * Commit the bounce buffer entry.
5756 */
5757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5759 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5760 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5761 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5762 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5763 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5764 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5765 pVCpu->iem.s.cActiveMappings++;
5766
5767 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5768 *ppvMem = pbBuf;
5769 return VINF_SUCCESS;
5770}
5771
5772
5773
5774/**
5775 * Maps the specified guest memory for the given kind of access.
5776 *
5777 * This may be using bounce buffering of the memory if it's crossing a page
5778 * boundary or if there is an access handler installed for any of it. Because
5779 * of lock prefix guarantees, we're in for some extra clutter when this
5780 * happens.
5781 *
5782 * This may raise a \#GP, \#SS, \#PF or \#AC.
5783 *
5784 * @returns VBox strict status code.
5785 *
5786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5787 * @param ppvMem Where to return the pointer to the mapped
5788 * memory.
5789 * @param cbMem The number of bytes to map. This is usually 1,
5790 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
5791 * string operations it can be up to a page.
5792 * @param iSegReg The index of the segment register to use for
5793 * this access. The base and limits are checked.
5794 * Use UINT8_MAX to indicate that no segmentation
5795 * is required (for IDT, GDT and LDT accesses).
5796 * @param GCPtrMem The address of the guest memory.
5797 * @param fAccess How the memory is being accessed. The
5798 * IEM_ACCESS_TYPE_XXX bit is used to figure out
5799 * how to map the memory, while the
5800 * IEM_ACCESS_WHAT_XXX bit is used when raising
5801 * exceptions.
5802 */
5803VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
5804{
5805 /*
5806 * Check the input and figure out which mapping entry to use.
5807 */
5808 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
5809 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5810 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5811
5812 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5813 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5814 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5815 {
5816 iMemMap = iemMemMapFindFree(pVCpu);
5817 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5818 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5819 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5820 pVCpu->iem.s.aMemMappings[2].fAccess),
5821 VERR_IEM_IPE_9);
5822 }
5823
5824 /*
5825 * Map the memory, checking that we can actually access it. If something
5826 * slightly complicated happens, fall back on bounce buffering.
5827 */
5828 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5829 if (rcStrict == VINF_SUCCESS)
5830 { /* likely */ }
5831 else
5832 return rcStrict;
5833
5834 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5835 { /* likely */ }
5836 else
5837 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5838
5839#ifdef IEM_WITH_DATA_TLB
5840 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5841
5842 /*
5843 * Get the TLB entry for this page.
5844 */
5845 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
5846 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
5847 if (pTlbe->uTag == uTag)
5848 {
5849# ifdef VBOX_WITH_STATISTICS
5850 pVCpu->iem.s.DataTlb.cTlbHits++;
5851# endif
5852 }
5853 else
5854 {
5855 pVCpu->iem.s.DataTlb.cTlbMisses++;
5856 PGMPTWALK Walk;
5857 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5858 if (RT_FAILURE(rc))
5859 {
5860 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5861# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5862 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5863 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5864# endif
5865 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
5866 }
5867
5868 Assert(Walk.fSucceeded);
5869 pTlbe->uTag = uTag;
5870 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
5871 pTlbe->GCPhys = Walk.GCPhys;
5872 pTlbe->pbMappingR3 = NULL;
5873 }
5874
5875 /*
5876 * Check TLB page table level access flags.
5877 */
5878 /* If the page is either supervisor only or non-writable, we need to do
5879 more careful access checks. */
5880 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
5881 {
5882 /* Write to read only memory? */
5883 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
5884 && (fAccess & IEM_ACCESS_TYPE_WRITE)
5885 && ( ( pVCpu->iem.s.uCpl == 3
5886 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5887 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5888 {
5889 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5890# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5891 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5892 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5893# endif
5894 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5895 }
5896
5897 /* Kernel memory accessed by userland? */
5898 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
5899 && pVCpu->iem.s.uCpl == 3
5900 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5901 {
5902 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5903# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5904 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5905 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5906# endif
5907 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5908 }
5909 }
5910
5911 /*
5912 * Set the dirty / access flags.
5913 * ASSUMES this is set when the address is translated rather than on commit...
5914 */
5915 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5916 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
5917 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
5918 {
5919 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5920 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5921 AssertRC(rc2);
5922 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5923 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5924 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
5925 }
5926
5927 /*
5928 * Look up the physical page info if necessary.
5929 */
5930 uint8_t *pbMem = NULL;
5931 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
5932# ifdef IN_RING3
5933 pbMem = pTlbe->pbMappingR3;
5934# else
5935 pbMem = NULL;
5936# endif
5937 else
5938 {
5939 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
5940 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
5941 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
5942 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
5943 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
5944 { /* likely */ }
5945 else
5946 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
5947 pTlbe->pbMappingR3 = NULL;
5948 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
5949 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
5950 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
5951 &pbMem, &pTlbe->fFlagsAndPhysRev);
5952 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
5953# ifdef IN_RING3
5954 pTlbe->pbMappingR3 = pbMem;
5955# endif
5956 }
5957
5958 /*
5959 * Check the physical page level access and mapping.
5960 */
5961 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
5962 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
5963 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
5964 { /* probably likely */ }
5965 else
5966 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
5967 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
5968 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
5969 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
5970 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
5971 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
5972
5973 if (pbMem)
5974 {
5975 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
5976 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5977 fAccess |= IEM_ACCESS_NOT_LOCKED;
5978 }
5979 else
5980 {
5981 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
5982 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5983 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
5984 if (rcStrict != VINF_SUCCESS)
5985 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
5986 }
5987
5988 void * const pvMem = pbMem;
5989
5990 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5991 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
5992 if (fAccess & IEM_ACCESS_TYPE_READ)
5993 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
5994
5995#else /* !IEM_WITH_DATA_TLB */
5996
5997 RTGCPHYS GCPhysFirst;
5998 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
5999 if (rcStrict != VINF_SUCCESS)
6000 return rcStrict;
6001
6002 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6003 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6004 if (fAccess & IEM_ACCESS_TYPE_READ)
6005 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6006
6007 void *pvMem;
6008 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6009 if (rcStrict != VINF_SUCCESS)
6010 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6011
6012#endif /* !IEM_WITH_DATA_TLB */
6013
6014 /*
6015 * Fill in the mapping table entry.
6016 */
6017 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6018 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6019 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6020 pVCpu->iem.s.cActiveMappings += 1;
6021
6022 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6023 *ppvMem = pvMem;
6024
6025 return VINF_SUCCESS;
6026}
6027
6028
6029/**
6030 * Commits the guest memory if bounce buffered and unmaps it.
6031 *
6032 * @returns Strict VBox status code.
6033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6034 * @param pvMem The mapping.
6035 * @param fAccess The kind of access.
6036 */
6037VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6038{
6039 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6040 AssertReturn(iMemMap >= 0, iMemMap);
6041
6042 /* If it's bounce buffered, we may need to write back the buffer. */
6043 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6044 {
6045 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6046 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6047 }
6048 /* Otherwise unlock it. */
6049 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6050 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6051
6052 /* Free the entry. */
6053 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6054 Assert(pVCpu->iem.s.cActiveMappings != 0);
6055 pVCpu->iem.s.cActiveMappings--;
6056 return VINF_SUCCESS;
6057}
6058
6059#ifdef IEM_WITH_SETJMP
6060
6061/**
6062 * Maps the specified guest memory for the given kind of access, longjmp on
6063 * error.
6064 *
6065 * This may be using bounce buffering of the memory if it's crossing a page
6066 * boundary or if there is an access handler installed for any of it. Because
6067 * of lock prefix guarantees, we're in for some extra clutter when this
6068 * happens.
6069 *
6070 * This may raise a \#GP, \#SS, \#PF or \#AC.
6071 *
6072 * @returns Pointer to the mapped memory.
6073 *
6074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6075 * @param cbMem The number of bytes to map. This is usually 1,
6076 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6077 * string operations it can be up to a page.
6078 * @param iSegReg The index of the segment register to use for
6079 * this access. The base and limits are checked.
6080 * Use UINT8_MAX to indicate that no segmentation
6081 * is required (for IDT, GDT and LDT accesses).
6082 * @param GCPtrMem The address of the guest memory.
6083 * @param fAccess How the memory is being accessed. The
6084 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6085 * how to map the memory, while the
6086 * IEM_ACCESS_WHAT_XXX bit is used when raising
6087 * exceptions.
6088 */
6089void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess) RT_NOEXCEPT
6090{
6091 /*
6092 * Check the input and figure out which mapping entry to use.
6093 */
6094 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6095 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6096 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6097
6098 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6099 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6100 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6101 {
6102 iMemMap = iemMemMapFindFree(pVCpu);
6103 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6104 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6105 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6106 pVCpu->iem.s.aMemMappings[2].fAccess),
6107 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6108 }
6109
6110 /*
6111 * Map the memory, checking that we can actually access it. If something
6112 * slightly complicated happens, fall back on bounce buffering.
6113 */
6114 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6115 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6116 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6117
6118 /* Crossing a page boundary? */
6119 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6120 { /* No (likely). */ }
6121 else
6122 {
6123 void *pvMem;
6124 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6125 if (rcStrict == VINF_SUCCESS)
6126 return pvMem;
6127 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6128 }
6129
6130#ifdef IEM_WITH_DATA_TLB
6131 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6132
6133 /*
6134 * Get the TLB entry for this page.
6135 */
6136 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6137 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6138 if (pTlbe->uTag == uTag)
6139 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6140 else
6141 {
6142 pVCpu->iem.s.DataTlb.cTlbMisses++;
6143 PGMPTWALK Walk;
6144 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6145 if (RT_FAILURE(rc))
6146 {
6147 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6148# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6149 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6150 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6151# endif
6152 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6153 }
6154
6155 Assert(Walk.fSucceeded);
6156 pTlbe->uTag = uTag;
6157 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6158 pTlbe->GCPhys = Walk.GCPhys;
6159 pTlbe->pbMappingR3 = NULL;
6160 }
6161
6162 /*
6163 * Check the flags and physical revision.
6164 */
6165 /** @todo make the caller pass these in with fAccess. */
6166 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6167 ? IEMTLBE_F_PT_NO_USER : 0;
6168 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6169 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6170 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6171 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6172 ? IEMTLBE_F_PT_NO_WRITE : 0)
6173 : 0;
6174 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6175 uint8_t *pbMem = NULL;
6176 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6177 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6178# ifdef IN_RING3
6179 pbMem = pTlbe->pbMappingR3;
6180# else
6181 pbMem = NULL;
6182# endif
6183 else
6184 {
6185 /*
6186 * Okay, something isn't quite right or needs refreshing.
6187 */
6188 /* Write to read only memory? */
6189 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6190 {
6191 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6192# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6193 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6194 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6195# endif
6196 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6197 }
6198
6199 /* Kernel memory accessed by userland? */
6200 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6201 {
6202 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6203# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6204 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6205 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6206# endif
6207 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6208 }
6209
6210 /* Set the dirty / access flags.
6211 ASSUMES this is set when the address is translated rather than on commit... */
6212 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6213 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6214 {
6215 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6216 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6217 AssertRC(rc2);
6218 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6219 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6220 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6221 }
6222
6223 /*
6224 * Check if the physical page info needs updating.
6225 */
6226 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6227# ifdef IN_RING3
6228 pbMem = pTlbe->pbMappingR3;
6229# else
6230 pbMem = NULL;
6231# endif
6232 else
6233 {
6234 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6235 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6236 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6237 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6238 pTlbe->pbMappingR3 = NULL;
6239 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6240 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6241 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6242 &pbMem, &pTlbe->fFlagsAndPhysRev);
6243 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6244# ifdef IN_RING3
6245 pTlbe->pbMappingR3 = pbMem;
6246# endif
6247 }
6248
6249 /*
6250 * Check the physical page level access and mapping.
6251 */
6252 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6253 { /* probably likely */ }
6254 else
6255 {
6256 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6257 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6258 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6259 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6260 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6261 if (rcStrict == VINF_SUCCESS)
6262 return pbMem;
6263 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6264 }
6265 }
6266 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6267
6268 if (pbMem)
6269 {
6270 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6271 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6272 fAccess |= IEM_ACCESS_NOT_LOCKED;
6273 }
6274 else
6275 {
6276 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6277 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6278 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6279 if (rcStrict == VINF_SUCCESS)
6280 return pbMem;
6281 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6282 }
6283
6284 void * const pvMem = pbMem;
6285
6286 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6287 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6288 if (fAccess & IEM_ACCESS_TYPE_READ)
6289 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6290
6291#else /* !IEM_WITH_DATA_TLB */
6292
6293
6294 RTGCPHYS GCPhysFirst;
6295 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6296 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6297 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6298
6299 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6300 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6301 if (fAccess & IEM_ACCESS_TYPE_READ)
6302 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6303
6304 void *pvMem;
6305 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6306 if (rcStrict == VINF_SUCCESS)
6307 { /* likely */ }
6308 else
6309 {
6310 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6311 if (rcStrict == VINF_SUCCESS)
6312 return pvMem;
6313 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6314 }
6315
6316#endif /* !IEM_WITH_DATA_TLB */
6317
6318 /*
6319 * Fill in the mapping table entry.
6320 */
6321 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6322 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6323 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6324 pVCpu->iem.s.cActiveMappings++;
6325
6326 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6327 return pvMem;
6328}
6329
6330
6331/**
6332 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6333 *
6334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6335 * @param pvMem The mapping.
6336 * @param fAccess The kind of access.
6337 */
6338void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6339{
6340 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6341 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6342
6343 /* If it's bounce buffered, we may need to write back the buffer. */
6344 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6345 {
6346 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6347 {
6348 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6349 if (rcStrict == VINF_SUCCESS)
6350 return;
6351 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6352 }
6353 }
6354 /* Otherwise unlock it. */
6355 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6356 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6357
6358 /* Free the entry. */
6359 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6360 Assert(pVCpu->iem.s.cActiveMappings != 0);
6361 pVCpu->iem.s.cActiveMappings--;
6362}
6363
6364#endif /* IEM_WITH_SETJMP */
6365
6366#ifndef IN_RING3
6367/**
6368 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6369 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6370 *
6371 * Allows the instruction to be completed and retired, while the IEM user will
6372 * return to ring-3 immediately afterwards and do the postponed writes there.
6373 *
6374 * @returns VBox status code (no strict statuses). Caller must check
6375 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6377 * @param pvMem The mapping.
6378 * @param fAccess The kind of access.
6379 */
6380VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6381{
6382 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6383 AssertReturn(iMemMap >= 0, iMemMap);
6384
6385 /* If it's bounce buffered, we may need to write back the buffer. */
6386 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6387 {
6388 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6389 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6390 }
6391 /* Otherwise unlock it. */
6392 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6393 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6394
6395 /* Free the entry. */
6396 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6397 Assert(pVCpu->iem.s.cActiveMappings != 0);
6398 pVCpu->iem.s.cActiveMappings--;
6399 return VINF_SUCCESS;
6400}
6401#endif
6402
6403
6404/**
6405 * Rollbacks mappings, releasing page locks and such.
6406 *
6407 * The caller shall only call this after checking cActiveMappings.
6408 *
6409 * @returns Strict VBox status code to pass up.
6410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6411 */
6412void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6413{
6414 Assert(pVCpu->iem.s.cActiveMappings > 0);
6415
6416 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6417 while (iMemMap-- > 0)
6418 {
6419 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6420 if (fAccess != IEM_ACCESS_INVALID)
6421 {
6422 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6423 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6424 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6425 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6426 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6427 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6428 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6429 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6430 pVCpu->iem.s.cActiveMappings--;
6431 }
6432 }
6433}
6434
6435
6436/**
6437 * Fetches a data byte.
6438 *
6439 * @returns Strict VBox status code.
6440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6441 * @param pu8Dst Where to return the byte.
6442 * @param iSegReg The index of the segment register to use for
6443 * this access. The base and limits are checked.
6444 * @param GCPtrMem The address of the guest memory.
6445 */
6446VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6447{
6448 /* The lazy approach for now... */
6449 uint8_t const *pu8Src;
6450 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6451 if (rc == VINF_SUCCESS)
6452 {
6453 *pu8Dst = *pu8Src;
6454 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6455 }
6456 return rc;
6457}
6458
6459
6460#ifdef IEM_WITH_SETJMP
6461/**
6462 * Fetches a data byte, longjmp on error.
6463 *
6464 * @returns The byte.
6465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6466 * @param iSegReg The index of the segment register to use for
6467 * this access. The base and limits are checked.
6468 * @param GCPtrMem The address of the guest memory.
6469 */
6470uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6471{
6472 /* The lazy approach for now... */
6473 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6474 uint8_t const bRet = *pu8Src;
6475 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6476 return bRet;
6477}
6478#endif /* IEM_WITH_SETJMP */
6479
6480
6481/**
6482 * Fetches a data word.
6483 *
6484 * @returns Strict VBox status code.
6485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6486 * @param pu16Dst Where to return the word.
6487 * @param iSegReg The index of the segment register to use for
6488 * this access. The base and limits are checked.
6489 * @param GCPtrMem The address of the guest memory.
6490 */
6491VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6492{
6493 /* The lazy approach for now... */
6494 uint16_t const *pu16Src;
6495 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6496 if (rc == VINF_SUCCESS)
6497 {
6498 *pu16Dst = *pu16Src;
6499 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6500 }
6501 return rc;
6502}
6503
6504
6505#ifdef IEM_WITH_SETJMP
6506/**
6507 * Fetches a data word, longjmp on error.
6508 *
6509 * @returns The word
6510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6511 * @param iSegReg The index of the segment register to use for
6512 * this access. The base and limits are checked.
6513 * @param GCPtrMem The address of the guest memory.
6514 */
6515uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6516{
6517 /* The lazy approach for now... */
6518 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6519 uint16_t const u16Ret = *pu16Src;
6520 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6521 return u16Ret;
6522}
6523#endif
6524
6525
6526/**
6527 * Fetches a data dword.
6528 *
6529 * @returns Strict VBox status code.
6530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6531 * @param pu32Dst Where to return the dword.
6532 * @param iSegReg The index of the segment register to use for
6533 * this access. The base and limits are checked.
6534 * @param GCPtrMem The address of the guest memory.
6535 */
6536VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6537{
6538 /* The lazy approach for now... */
6539 uint32_t const *pu32Src;
6540 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6541 if (rc == VINF_SUCCESS)
6542 {
6543 *pu32Dst = *pu32Src;
6544 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6545 }
6546 return rc;
6547}
6548
6549
6550/**
6551 * Fetches a data dword and zero extends it to a qword.
6552 *
6553 * @returns Strict VBox status code.
6554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6555 * @param pu64Dst Where to return the qword.
6556 * @param iSegReg The index of the segment register to use for
6557 * this access. The base and limits are checked.
6558 * @param GCPtrMem The address of the guest memory.
6559 */
6560VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6561{
6562 /* The lazy approach for now... */
6563 uint32_t const *pu32Src;
6564 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6565 if (rc == VINF_SUCCESS)
6566 {
6567 *pu64Dst = *pu32Src;
6568 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6569 }
6570 return rc;
6571}
6572
6573
6574#ifdef IEM_WITH_SETJMP
6575
6576/**
6577 * Fetches a data dword, longjmp on error, fallback/safe version.
6578 *
6579 * @returns The dword
6580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6581 * @param iSegReg The index of the segment register to use for
6582 * this access. The base and limits are checked.
6583 * @param GCPtrMem The address of the guest memory.
6584 */
6585static uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6586{
6587 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6588 uint32_t const u32Ret = *pu32Src;
6589 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6590 return u32Ret;
6591}
6592
6593
6594/**
6595 * Fetches a data dword, longjmp on error.
6596 *
6597 * @returns The dword
6598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6599 * @param iSegReg The index of the segment register to use for
6600 * this access. The base and limits are checked.
6601 * @param GCPtrMem The address of the guest memory.
6602 */
6603uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6604{
6605# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6606 /*
6607 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6608 */
6609 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6610 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6611 {
6612 /*
6613 * TLB lookup.
6614 */
6615 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6616 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6617 if (pTlbe->uTag == uTag)
6618 {
6619 /*
6620 * Check TLB page table level access flags.
6621 */
6622 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6623 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6624 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6625 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6626 {
6627 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6628
6629 /*
6630 * Alignment check:
6631 */
6632 /** @todo check priority \#AC vs \#PF */
6633 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6634 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6635 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6636 || pVCpu->iem.s.uCpl != 3)
6637 {
6638 /*
6639 * Fetch and return the dword
6640 */
6641 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6642 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6643 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6644 }
6645 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6646 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6647 }
6648 }
6649 }
6650
6651 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6652 outdated page pointer, or other troubles. */
6653 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6654# endif
6655 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6656}
6657#endif
6658
6659
6660#ifdef SOME_UNUSED_FUNCTION
6661/**
6662 * Fetches a data dword and sign extends it to a qword.
6663 *
6664 * @returns Strict VBox status code.
6665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6666 * @param pu64Dst Where to return the sign extended value.
6667 * @param iSegReg The index of the segment register to use for
6668 * this access. The base and limits are checked.
6669 * @param GCPtrMem The address of the guest memory.
6670 */
6671VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6672{
6673 /* The lazy approach for now... */
6674 int32_t const *pi32Src;
6675 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6676 if (rc == VINF_SUCCESS)
6677 {
6678 *pu64Dst = *pi32Src;
6679 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6680 }
6681#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6682 else
6683 *pu64Dst = 0;
6684#endif
6685 return rc;
6686}
6687#endif
6688
6689
6690/**
6691 * Fetches a data qword.
6692 *
6693 * @returns Strict VBox status code.
6694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6695 * @param pu64Dst Where to return the qword.
6696 * @param iSegReg The index of the segment register to use for
6697 * this access. The base and limits are checked.
6698 * @param GCPtrMem The address of the guest memory.
6699 */
6700VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6701{
6702 /* The lazy approach for now... */
6703 uint64_t const *pu64Src;
6704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6705 if (rc == VINF_SUCCESS)
6706 {
6707 *pu64Dst = *pu64Src;
6708 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6709 }
6710 return rc;
6711}
6712
6713
6714#ifdef IEM_WITH_SETJMP
6715/**
6716 * Fetches a data qword, longjmp on error.
6717 *
6718 * @returns The qword.
6719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6720 * @param iSegReg The index of the segment register to use for
6721 * this access. The base and limits are checked.
6722 * @param GCPtrMem The address of the guest memory.
6723 */
6724uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6725{
6726 /* The lazy approach for now... */
6727 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6728 uint64_t const u64Ret = *pu64Src;
6729 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6730 return u64Ret;
6731}
6732#endif
6733
6734
6735/**
6736 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6737 *
6738 * @returns Strict VBox status code.
6739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6740 * @param pu64Dst Where to return the qword.
6741 * @param iSegReg The index of the segment register to use for
6742 * this access. The base and limits are checked.
6743 * @param GCPtrMem The address of the guest memory.
6744 */
6745VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6746{
6747 /* The lazy approach for now... */
6748 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6749 if (RT_UNLIKELY(GCPtrMem & 15))
6750 return iemRaiseGeneralProtectionFault0(pVCpu);
6751
6752 uint64_t const *pu64Src;
6753 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6754 if (rc == VINF_SUCCESS)
6755 {
6756 *pu64Dst = *pu64Src;
6757 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6758 }
6759 return rc;
6760}
6761
6762
6763#ifdef IEM_WITH_SETJMP
6764/**
6765 * Fetches a data qword, longjmp on error.
6766 *
6767 * @returns The qword.
6768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6769 * @param iSegReg The index of the segment register to use for
6770 * this access. The base and limits are checked.
6771 * @param GCPtrMem The address of the guest memory.
6772 */
6773uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6774{
6775 /* The lazy approach for now... */
6776 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6777 if (RT_LIKELY(!(GCPtrMem & 15)))
6778 {
6779 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6780 uint64_t const u64Ret = *pu64Src;
6781 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6782 return u64Ret;
6783 }
6784
6785 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
6786 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
6787}
6788#endif
6789
6790
6791/**
6792 * Fetches a data tword.
6793 *
6794 * @returns Strict VBox status code.
6795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6796 * @param pr80Dst Where to return the tword.
6797 * @param iSegReg The index of the segment register to use for
6798 * this access. The base and limits are checked.
6799 * @param GCPtrMem The address of the guest memory.
6800 */
6801VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6802{
6803 /* The lazy approach for now... */
6804 PCRTFLOAT80U pr80Src;
6805 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6806 if (rc == VINF_SUCCESS)
6807 {
6808 *pr80Dst = *pr80Src;
6809 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6810 }
6811 return rc;
6812}
6813
6814
6815#ifdef IEM_WITH_SETJMP
6816/**
6817 * Fetches a data tword, longjmp on error.
6818 *
6819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6820 * @param pr80Dst Where to return the tword.
6821 * @param iSegReg The index of the segment register to use for
6822 * this access. The base and limits are checked.
6823 * @param GCPtrMem The address of the guest memory.
6824 */
6825void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6826{
6827 /* The lazy approach for now... */
6828 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6829 *pr80Dst = *pr80Src;
6830 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6831}
6832#endif
6833
6834
6835/**
6836 * Fetches a data tword.
6837 *
6838 * @returns Strict VBox status code.
6839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6840 * @param pd80Dst Where to return the tword.
6841 * @param iSegReg The index of the segment register to use for
6842 * this access. The base and limits are checked.
6843 * @param GCPtrMem The address of the guest memory.
6844 */
6845VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6846{
6847 /* The lazy approach for now... */
6848 PCRTPBCD80U pd80Src;
6849 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6850 if (rc == VINF_SUCCESS)
6851 {
6852 *pd80Dst = *pd80Src;
6853 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6854 }
6855 return rc;
6856}
6857
6858
6859#ifdef IEM_WITH_SETJMP
6860/**
6861 * Fetches a data tword, longjmp on error.
6862 *
6863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6864 * @param pd80Dst Where to return the tword.
6865 * @param iSegReg The index of the segment register to use for
6866 * this access. The base and limits are checked.
6867 * @param GCPtrMem The address of the guest memory.
6868 */
6869void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6870{
6871 /* The lazy approach for now... */
6872 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6873 *pd80Dst = *pd80Src;
6874 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
6875}
6876#endif
6877
6878
6879/**
6880 * Fetches a data dqword (double qword), generally SSE related.
6881 *
6882 * @returns Strict VBox status code.
6883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6884 * @param pu128Dst Where to return the qword.
6885 * @param iSegReg The index of the segment register to use for
6886 * this access. The base and limits are checked.
6887 * @param GCPtrMem The address of the guest memory.
6888 */
6889VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6890{
6891 /* The lazy approach for now... */
6892 PCRTUINT128U pu128Src;
6893 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6894 if (rc == VINF_SUCCESS)
6895 {
6896 pu128Dst->au64[0] = pu128Src->au64[0];
6897 pu128Dst->au64[1] = pu128Src->au64[1];
6898 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6899 }
6900 return rc;
6901}
6902
6903
6904#ifdef IEM_WITH_SETJMP
6905/**
6906 * Fetches a data dqword (double qword), generally SSE related.
6907 *
6908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6909 * @param pu128Dst Where to return the qword.
6910 * @param iSegReg The index of the segment register to use for
6911 * this access. The base and limits are checked.
6912 * @param GCPtrMem The address of the guest memory.
6913 */
6914void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6915{
6916 /* The lazy approach for now... */
6917 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6918 pu128Dst->au64[0] = pu128Src->au64[0];
6919 pu128Dst->au64[1] = pu128Src->au64[1];
6920 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6921}
6922#endif
6923
6924
6925/**
6926 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6927 * related.
6928 *
6929 * Raises \#GP(0) if not aligned.
6930 *
6931 * @returns Strict VBox status code.
6932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6933 * @param pu128Dst Where to return the qword.
6934 * @param iSegReg The index of the segment register to use for
6935 * this access. The base and limits are checked.
6936 * @param GCPtrMem The address of the guest memory.
6937 */
6938VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6939{
6940 /* The lazy approach for now... */
6941 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6942 if ( (GCPtrMem & 15)
6943 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6944 return iemRaiseGeneralProtectionFault0(pVCpu);
6945
6946 PCRTUINT128U pu128Src;
6947 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6948 if (rc == VINF_SUCCESS)
6949 {
6950 pu128Dst->au64[0] = pu128Src->au64[0];
6951 pu128Dst->au64[1] = pu128Src->au64[1];
6952 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6953 }
6954 return rc;
6955}
6956
6957
6958#ifdef IEM_WITH_SETJMP
6959/**
6960 * Fetches a data dqword (double qword) at an aligned address, generally SSE
6961 * related, longjmp on error.
6962 *
6963 * Raises \#GP(0) if not aligned.
6964 *
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 * @param pu128Dst Where to return the qword.
6967 * @param iSegReg The index of the segment register to use for
6968 * this access. The base and limits are checked.
6969 * @param GCPtrMem The address of the guest memory.
6970 */
6971void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6972{
6973 /* The lazy approach for now... */
6974 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
6975 if ( (GCPtrMem & 15) == 0
6976 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
6977 {
6978 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
6979 pu128Dst->au64[0] = pu128Src->au64[0];
6980 pu128Dst->au64[1] = pu128Src->au64[1];
6981 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
6982 return;
6983 }
6984
6985 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
6986 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6987}
6988#endif
6989
6990
6991/**
6992 * Fetches a data oword (octo word), generally AVX related.
6993 *
6994 * @returns Strict VBox status code.
6995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6996 * @param pu256Dst Where to return the qword.
6997 * @param iSegReg The index of the segment register to use for
6998 * this access. The base and limits are checked.
6999 * @param GCPtrMem The address of the guest memory.
7000 */
7001VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7002{
7003 /* The lazy approach for now... */
7004 PCRTUINT256U pu256Src;
7005 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7006 if (rc == VINF_SUCCESS)
7007 {
7008 pu256Dst->au64[0] = pu256Src->au64[0];
7009 pu256Dst->au64[1] = pu256Src->au64[1];
7010 pu256Dst->au64[2] = pu256Src->au64[2];
7011 pu256Dst->au64[3] = pu256Src->au64[3];
7012 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7013 }
7014 return rc;
7015}
7016
7017
7018#ifdef IEM_WITH_SETJMP
7019/**
7020 * Fetches a data oword (octo word), generally AVX related.
7021 *
7022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7023 * @param pu256Dst Where to return the qword.
7024 * @param iSegReg The index of the segment register to use for
7025 * this access. The base and limits are checked.
7026 * @param GCPtrMem The address of the guest memory.
7027 */
7028void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7029{
7030 /* The lazy approach for now... */
7031 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7032 pu256Dst->au64[0] = pu256Src->au64[0];
7033 pu256Dst->au64[1] = pu256Src->au64[1];
7034 pu256Dst->au64[2] = pu256Src->au64[2];
7035 pu256Dst->au64[3] = pu256Src->au64[3];
7036 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7037}
7038#endif
7039
7040
7041/**
7042 * Fetches a data oword (octo word) at an aligned address, generally AVX
7043 * related.
7044 *
7045 * Raises \#GP(0) if not aligned.
7046 *
7047 * @returns Strict VBox status code.
7048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7049 * @param pu256Dst Where to return the qword.
7050 * @param iSegReg The index of the segment register to use for
7051 * this access. The base and limits are checked.
7052 * @param GCPtrMem The address of the guest memory.
7053 */
7054VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7055{
7056 /* The lazy approach for now... */
7057 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
7058 if (GCPtrMem & 31)
7059 return iemRaiseGeneralProtectionFault0(pVCpu);
7060
7061 PCRTUINT256U pu256Src;
7062 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7063 if (rc == VINF_SUCCESS)
7064 {
7065 pu256Dst->au64[0] = pu256Src->au64[0];
7066 pu256Dst->au64[1] = pu256Src->au64[1];
7067 pu256Dst->au64[2] = pu256Src->au64[2];
7068 pu256Dst->au64[3] = pu256Src->au64[3];
7069 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7070 }
7071 return rc;
7072}
7073
7074
7075#ifdef IEM_WITH_SETJMP
7076/**
7077 * Fetches a data oword (octo word) at an aligned address, generally AVX
7078 * related, longjmp on error.
7079 *
7080 * Raises \#GP(0) if not aligned.
7081 *
7082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7083 * @param pu256Dst Where to return the qword.
7084 * @param iSegReg The index of the segment register to use for
7085 * this access. The base and limits are checked.
7086 * @param GCPtrMem The address of the guest memory.
7087 */
7088void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7089{
7090 /* The lazy approach for now... */
7091 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
7092 if ((GCPtrMem & 31) == 0)
7093 {
7094 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
7095 pu256Dst->au64[0] = pu256Src->au64[0];
7096 pu256Dst->au64[1] = pu256Src->au64[1];
7097 pu256Dst->au64[2] = pu256Src->au64[2];
7098 pu256Dst->au64[3] = pu256Src->au64[3];
7099 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7100 return;
7101 }
7102
7103 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7104 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7105}
7106#endif
7107
7108
7109
7110/**
7111 * Fetches a descriptor register (lgdt, lidt).
7112 *
7113 * @returns Strict VBox status code.
7114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7115 * @param pcbLimit Where to return the limit.
7116 * @param pGCPtrBase Where to return the base.
7117 * @param iSegReg The index of the segment register to use for
7118 * this access. The base and limits are checked.
7119 * @param GCPtrMem The address of the guest memory.
7120 * @param enmOpSize The effective operand size.
7121 */
7122VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7123 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7124{
7125 /*
7126 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7127 * little special:
7128 * - The two reads are done separately.
7129 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7130 * - We suspect the 386 to actually commit the limit before the base in
7131 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7132 * don't try emulate this eccentric behavior, because it's not well
7133 * enough understood and rather hard to trigger.
7134 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7135 */
7136 VBOXSTRICTRC rcStrict;
7137 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7138 {
7139 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7140 if (rcStrict == VINF_SUCCESS)
7141 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7142 }
7143 else
7144 {
7145 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7146 if (enmOpSize == IEMMODE_32BIT)
7147 {
7148 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7149 {
7150 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7151 if (rcStrict == VINF_SUCCESS)
7152 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7153 }
7154 else
7155 {
7156 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7157 if (rcStrict == VINF_SUCCESS)
7158 {
7159 *pcbLimit = (uint16_t)uTmp;
7160 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7161 }
7162 }
7163 if (rcStrict == VINF_SUCCESS)
7164 *pGCPtrBase = uTmp;
7165 }
7166 else
7167 {
7168 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7169 if (rcStrict == VINF_SUCCESS)
7170 {
7171 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7172 if (rcStrict == VINF_SUCCESS)
7173 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7174 }
7175 }
7176 }
7177 return rcStrict;
7178}
7179
7180
7181
7182/**
7183 * Stores a data byte.
7184 *
7185 * @returns Strict VBox status code.
7186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7187 * @param iSegReg The index of the segment register to use for
7188 * this access. The base and limits are checked.
7189 * @param GCPtrMem The address of the guest memory.
7190 * @param u8Value The value to store.
7191 */
7192VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7193{
7194 /* The lazy approach for now... */
7195 uint8_t *pu8Dst;
7196 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7197 if (rc == VINF_SUCCESS)
7198 {
7199 *pu8Dst = u8Value;
7200 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7201 }
7202 return rc;
7203}
7204
7205
7206#ifdef IEM_WITH_SETJMP
7207/**
7208 * Stores a data byte, longjmp on error.
7209 *
7210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7211 * @param iSegReg The index of the segment register to use for
7212 * this access. The base and limits are checked.
7213 * @param GCPtrMem The address of the guest memory.
7214 * @param u8Value The value to store.
7215 */
7216void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7217{
7218 /* The lazy approach for now... */
7219 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7220 *pu8Dst = u8Value;
7221 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7222}
7223#endif
7224
7225
7226/**
7227 * Stores a data word.
7228 *
7229 * @returns Strict VBox status code.
7230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7231 * @param iSegReg The index of the segment register to use for
7232 * this access. The base and limits are checked.
7233 * @param GCPtrMem The address of the guest memory.
7234 * @param u16Value The value to store.
7235 */
7236VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7237{
7238 /* The lazy approach for now... */
7239 uint16_t *pu16Dst;
7240 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7241 if (rc == VINF_SUCCESS)
7242 {
7243 *pu16Dst = u16Value;
7244 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7245 }
7246 return rc;
7247}
7248
7249
7250#ifdef IEM_WITH_SETJMP
7251/**
7252 * Stores a data word, longjmp on error.
7253 *
7254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7255 * @param iSegReg The index of the segment register to use for
7256 * this access. The base and limits are checked.
7257 * @param GCPtrMem The address of the guest memory.
7258 * @param u16Value The value to store.
7259 */
7260void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7261{
7262 /* The lazy approach for now... */
7263 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7264 *pu16Dst = u16Value;
7265 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7266}
7267#endif
7268
7269
7270/**
7271 * Stores a data dword.
7272 *
7273 * @returns Strict VBox status code.
7274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7275 * @param iSegReg The index of the segment register to use for
7276 * this access. The base and limits are checked.
7277 * @param GCPtrMem The address of the guest memory.
7278 * @param u32Value The value to store.
7279 */
7280VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7281{
7282 /* The lazy approach for now... */
7283 uint32_t *pu32Dst;
7284 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7285 if (rc == VINF_SUCCESS)
7286 {
7287 *pu32Dst = u32Value;
7288 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7289 }
7290 return rc;
7291}
7292
7293
7294#ifdef IEM_WITH_SETJMP
7295/**
7296 * Stores a data dword.
7297 *
7298 * @returns Strict VBox status code.
7299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7300 * @param iSegReg The index of the segment register to use for
7301 * this access. The base and limits are checked.
7302 * @param GCPtrMem The address of the guest memory.
7303 * @param u32Value The value to store.
7304 */
7305void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7306{
7307 /* The lazy approach for now... */
7308 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7309 *pu32Dst = u32Value;
7310 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7311}
7312#endif
7313
7314
7315/**
7316 * Stores a data qword.
7317 *
7318 * @returns Strict VBox status code.
7319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7320 * @param iSegReg The index of the segment register to use for
7321 * this access. The base and limits are checked.
7322 * @param GCPtrMem The address of the guest memory.
7323 * @param u64Value The value to store.
7324 */
7325VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7326{
7327 /* The lazy approach for now... */
7328 uint64_t *pu64Dst;
7329 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7330 if (rc == VINF_SUCCESS)
7331 {
7332 *pu64Dst = u64Value;
7333 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7334 }
7335 return rc;
7336}
7337
7338
7339#ifdef IEM_WITH_SETJMP
7340/**
7341 * Stores a data qword, longjmp on error.
7342 *
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param iSegReg The index of the segment register to use for
7345 * this access. The base and limits are checked.
7346 * @param GCPtrMem The address of the guest memory.
7347 * @param u64Value The value to store.
7348 */
7349void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7350{
7351 /* The lazy approach for now... */
7352 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7353 *pu64Dst = u64Value;
7354 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7355}
7356#endif
7357
7358
7359/**
7360 * Stores a data dqword.
7361 *
7362 * @returns Strict VBox status code.
7363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7364 * @param iSegReg The index of the segment register to use for
7365 * this access. The base and limits are checked.
7366 * @param GCPtrMem The address of the guest memory.
7367 * @param u128Value The value to store.
7368 */
7369VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7370{
7371 /* The lazy approach for now... */
7372 PRTUINT128U pu128Dst;
7373 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7374 if (rc == VINF_SUCCESS)
7375 {
7376 pu128Dst->au64[0] = u128Value.au64[0];
7377 pu128Dst->au64[1] = u128Value.au64[1];
7378 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7379 }
7380 return rc;
7381}
7382
7383
7384#ifdef IEM_WITH_SETJMP
7385/**
7386 * Stores a data dqword, longjmp on error.
7387 *
7388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7389 * @param iSegReg The index of the segment register to use for
7390 * this access. The base and limits are checked.
7391 * @param GCPtrMem The address of the guest memory.
7392 * @param u128Value The value to store.
7393 */
7394void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7395{
7396 /* The lazy approach for now... */
7397 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7398 pu128Dst->au64[0] = u128Value.au64[0];
7399 pu128Dst->au64[1] = u128Value.au64[1];
7400 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7401}
7402#endif
7403
7404
7405/**
7406 * Stores a data dqword, SSE aligned.
7407 *
7408 * @returns Strict VBox status code.
7409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7410 * @param iSegReg The index of the segment register to use for
7411 * this access. The base and limits are checked.
7412 * @param GCPtrMem The address of the guest memory.
7413 * @param u128Value The value to store.
7414 */
7415VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7416{
7417 /* The lazy approach for now... */
7418 if ( (GCPtrMem & 15)
7419 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7420 return iemRaiseGeneralProtectionFault0(pVCpu);
7421
7422 PRTUINT128U pu128Dst;
7423 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7424 if (rc == VINF_SUCCESS)
7425 {
7426 pu128Dst->au64[0] = u128Value.au64[0];
7427 pu128Dst->au64[1] = u128Value.au64[1];
7428 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7429 }
7430 return rc;
7431}
7432
7433
7434#ifdef IEM_WITH_SETJMP
7435/**
7436 * Stores a data dqword, SSE aligned.
7437 *
7438 * @returns Strict VBox status code.
7439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7440 * @param iSegReg The index of the segment register to use for
7441 * this access. The base and limits are checked.
7442 * @param GCPtrMem The address of the guest memory.
7443 * @param u128Value The value to store.
7444 */
7445void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7446{
7447 /* The lazy approach for now... */
7448 if ( (GCPtrMem & 15) == 0
7449 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
7450 {
7451 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7452 pu128Dst->au64[0] = u128Value.au64[0];
7453 pu128Dst->au64[1] = u128Value.au64[1];
7454 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7455 return;
7456 }
7457
7458 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7459 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7460}
7461#endif
7462
7463
7464/**
7465 * Stores a data dqword.
7466 *
7467 * @returns Strict VBox status code.
7468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7469 * @param iSegReg The index of the segment register to use for
7470 * this access. The base and limits are checked.
7471 * @param GCPtrMem The address of the guest memory.
7472 * @param pu256Value Pointer to the value to store.
7473 */
7474VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7475{
7476 /* The lazy approach for now... */
7477 PRTUINT256U pu256Dst;
7478 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7479 if (rc == VINF_SUCCESS)
7480 {
7481 pu256Dst->au64[0] = pu256Value->au64[0];
7482 pu256Dst->au64[1] = pu256Value->au64[1];
7483 pu256Dst->au64[2] = pu256Value->au64[2];
7484 pu256Dst->au64[3] = pu256Value->au64[3];
7485 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7486 }
7487 return rc;
7488}
7489
7490
7491#ifdef IEM_WITH_SETJMP
7492/**
7493 * Stores a data dqword, longjmp on error.
7494 *
7495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7496 * @param iSegReg The index of the segment register to use for
7497 * this access. The base and limits are checked.
7498 * @param GCPtrMem The address of the guest memory.
7499 * @param pu256Value Pointer to the value to store.
7500 */
7501void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7502{
7503 /* The lazy approach for now... */
7504 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7505 pu256Dst->au64[0] = pu256Value->au64[0];
7506 pu256Dst->au64[1] = pu256Value->au64[1];
7507 pu256Dst->au64[2] = pu256Value->au64[2];
7508 pu256Dst->au64[3] = pu256Value->au64[3];
7509 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7510}
7511#endif
7512
7513
7514/**
7515 * Stores a data dqword, AVX aligned.
7516 *
7517 * @returns Strict VBox status code.
7518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7519 * @param iSegReg The index of the segment register to use for
7520 * this access. The base and limits are checked.
7521 * @param GCPtrMem The address of the guest memory.
7522 * @param pu256Value Pointer to the value to store.
7523 */
7524VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7525{
7526 /* The lazy approach for now... */
7527 if (GCPtrMem & 31)
7528 return iemRaiseGeneralProtectionFault0(pVCpu);
7529
7530 PRTUINT256U pu256Dst;
7531 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7532 if (rc == VINF_SUCCESS)
7533 {
7534 pu256Dst->au64[0] = pu256Value->au64[0];
7535 pu256Dst->au64[1] = pu256Value->au64[1];
7536 pu256Dst->au64[2] = pu256Value->au64[2];
7537 pu256Dst->au64[3] = pu256Value->au64[3];
7538 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7539 }
7540 return rc;
7541}
7542
7543
7544#ifdef IEM_WITH_SETJMP
7545/**
7546 * Stores a data dqword, AVX aligned.
7547 *
7548 * @returns Strict VBox status code.
7549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7550 * @param iSegReg The index of the segment register to use for
7551 * this access. The base and limits are checked.
7552 * @param GCPtrMem The address of the guest memory.
7553 * @param pu256Value Pointer to the value to store.
7554 */
7555void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7556{
7557 /* The lazy approach for now... */
7558 if ((GCPtrMem & 31) == 0)
7559 {
7560 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
7561 pu256Dst->au64[0] = pu256Value->au64[0];
7562 pu256Dst->au64[1] = pu256Value->au64[1];
7563 pu256Dst->au64[2] = pu256Value->au64[2];
7564 pu256Dst->au64[3] = pu256Value->au64[3];
7565 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7566 return;
7567 }
7568
7569 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
7570 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
7571}
7572#endif
7573
7574
7575/**
7576 * Stores a descriptor register (sgdt, sidt).
7577 *
7578 * @returns Strict VBox status code.
7579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7580 * @param cbLimit The limit.
7581 * @param GCPtrBase The base address.
7582 * @param iSegReg The index of the segment register to use for
7583 * this access. The base and limits are checked.
7584 * @param GCPtrMem The address of the guest memory.
7585 */
7586VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7587{
7588 /*
7589 * The SIDT and SGDT instructions actually stores the data using two
7590 * independent writes. The instructions does not respond to opsize prefixes.
7591 */
7592 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7593 if (rcStrict == VINF_SUCCESS)
7594 {
7595 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7596 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7597 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7598 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7599 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7600 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7601 else
7602 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7603 }
7604 return rcStrict;
7605}
7606
7607
7608/**
7609 * Pushes a word onto the stack.
7610 *
7611 * @returns Strict VBox status code.
7612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7613 * @param u16Value The value to push.
7614 */
7615VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7616{
7617 /* Increment the stack pointer. */
7618 uint64_t uNewRsp;
7619 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7620
7621 /* Write the word the lazy way. */
7622 uint16_t *pu16Dst;
7623 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7624 if (rc == VINF_SUCCESS)
7625 {
7626 *pu16Dst = u16Value;
7627 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7628 }
7629
7630 /* Commit the new RSP value unless we an access handler made trouble. */
7631 if (rc == VINF_SUCCESS)
7632 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7633
7634 return rc;
7635}
7636
7637
7638/**
7639 * Pushes a dword onto the stack.
7640 *
7641 * @returns Strict VBox status code.
7642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7643 * @param u32Value The value to push.
7644 */
7645VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7646{
7647 /* Increment the stack pointer. */
7648 uint64_t uNewRsp;
7649 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7650
7651 /* Write the dword the lazy way. */
7652 uint32_t *pu32Dst;
7653 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7654 if (rc == VINF_SUCCESS)
7655 {
7656 *pu32Dst = u32Value;
7657 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7658 }
7659
7660 /* Commit the new RSP value unless we an access handler made trouble. */
7661 if (rc == VINF_SUCCESS)
7662 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7663
7664 return rc;
7665}
7666
7667
7668/**
7669 * Pushes a dword segment register value onto the stack.
7670 *
7671 * @returns Strict VBox status code.
7672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7673 * @param u32Value The value to push.
7674 */
7675VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7676{
7677 /* Increment the stack pointer. */
7678 uint64_t uNewRsp;
7679 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7680
7681 /* The intel docs talks about zero extending the selector register
7682 value. My actual intel CPU here might be zero extending the value
7683 but it still only writes the lower word... */
7684 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7685 * happens when crossing an electric page boundrary, is the high word checked
7686 * for write accessibility or not? Probably it is. What about segment limits?
7687 * It appears this behavior is also shared with trap error codes.
7688 *
7689 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7690 * ancient hardware when it actually did change. */
7691 uint16_t *pu16Dst;
7692 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
7693 if (rc == VINF_SUCCESS)
7694 {
7695 *pu16Dst = (uint16_t)u32Value;
7696 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7697 }
7698
7699 /* Commit the new RSP value unless we an access handler made trouble. */
7700 if (rc == VINF_SUCCESS)
7701 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7702
7703 return rc;
7704}
7705
7706
7707/**
7708 * Pushes a qword onto the stack.
7709 *
7710 * @returns Strict VBox status code.
7711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7712 * @param u64Value The value to push.
7713 */
7714VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7715{
7716 /* Increment the stack pointer. */
7717 uint64_t uNewRsp;
7718 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7719
7720 /* Write the word the lazy way. */
7721 uint64_t *pu64Dst;
7722 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7723 if (rc == VINF_SUCCESS)
7724 {
7725 *pu64Dst = u64Value;
7726 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7727 }
7728
7729 /* Commit the new RSP value unless we an access handler made trouble. */
7730 if (rc == VINF_SUCCESS)
7731 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7732
7733 return rc;
7734}
7735
7736
7737/**
7738 * Pops a word from the stack.
7739 *
7740 * @returns Strict VBox status code.
7741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7742 * @param pu16Value Where to store the popped value.
7743 */
7744VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7745{
7746 /* Increment the stack pointer. */
7747 uint64_t uNewRsp;
7748 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7749
7750 /* Write the word the lazy way. */
7751 uint16_t const *pu16Src;
7752 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7753 if (rc == VINF_SUCCESS)
7754 {
7755 *pu16Value = *pu16Src;
7756 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7757
7758 /* Commit the new RSP value. */
7759 if (rc == VINF_SUCCESS)
7760 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7761 }
7762
7763 return rc;
7764}
7765
7766
7767/**
7768 * Pops a dword from the stack.
7769 *
7770 * @returns Strict VBox status code.
7771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7772 * @param pu32Value Where to store the popped value.
7773 */
7774VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7775{
7776 /* Increment the stack pointer. */
7777 uint64_t uNewRsp;
7778 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7779
7780 /* Write the word the lazy way. */
7781 uint32_t const *pu32Src;
7782 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7783 if (rc == VINF_SUCCESS)
7784 {
7785 *pu32Value = *pu32Src;
7786 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7787
7788 /* Commit the new RSP value. */
7789 if (rc == VINF_SUCCESS)
7790 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7791 }
7792
7793 return rc;
7794}
7795
7796
7797/**
7798 * Pops a qword from the stack.
7799 *
7800 * @returns Strict VBox status code.
7801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7802 * @param pu64Value Where to store the popped value.
7803 */
7804VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7805{
7806 /* Increment the stack pointer. */
7807 uint64_t uNewRsp;
7808 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7809
7810 /* Write the word the lazy way. */
7811 uint64_t const *pu64Src;
7812 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7813 if (rc == VINF_SUCCESS)
7814 {
7815 *pu64Value = *pu64Src;
7816 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7817
7818 /* Commit the new RSP value. */
7819 if (rc == VINF_SUCCESS)
7820 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7821 }
7822
7823 return rc;
7824}
7825
7826
7827/**
7828 * Pushes a word onto the stack, using a temporary stack pointer.
7829 *
7830 * @returns Strict VBox status code.
7831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7832 * @param u16Value The value to push.
7833 * @param pTmpRsp Pointer to the temporary stack pointer.
7834 */
7835VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7836{
7837 /* Increment the stack pointer. */
7838 RTUINT64U NewRsp = *pTmpRsp;
7839 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
7840
7841 /* Write the word the lazy way. */
7842 uint16_t *pu16Dst;
7843 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7844 if (rc == VINF_SUCCESS)
7845 {
7846 *pu16Dst = u16Value;
7847 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7848 }
7849
7850 /* Commit the new RSP value unless we an access handler made trouble. */
7851 if (rc == VINF_SUCCESS)
7852 *pTmpRsp = NewRsp;
7853
7854 return rc;
7855}
7856
7857
7858/**
7859 * Pushes a dword onto the stack, using a temporary stack pointer.
7860 *
7861 * @returns Strict VBox status code.
7862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7863 * @param u32Value The value to push.
7864 * @param pTmpRsp Pointer to the temporary stack pointer.
7865 */
7866VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7867{
7868 /* Increment the stack pointer. */
7869 RTUINT64U NewRsp = *pTmpRsp;
7870 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
7871
7872 /* Write the word the lazy way. */
7873 uint32_t *pu32Dst;
7874 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7875 if (rc == VINF_SUCCESS)
7876 {
7877 *pu32Dst = u32Value;
7878 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7879 }
7880
7881 /* Commit the new RSP value unless we an access handler made trouble. */
7882 if (rc == VINF_SUCCESS)
7883 *pTmpRsp = NewRsp;
7884
7885 return rc;
7886}
7887
7888
7889/**
7890 * Pushes a dword onto the stack, using a temporary stack pointer.
7891 *
7892 * @returns Strict VBox status code.
7893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7894 * @param u64Value The value to push.
7895 * @param pTmpRsp Pointer to the temporary stack pointer.
7896 */
7897VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7898{
7899 /* Increment the stack pointer. */
7900 RTUINT64U NewRsp = *pTmpRsp;
7901 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
7902
7903 /* Write the word the lazy way. */
7904 uint64_t *pu64Dst;
7905 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
7906 if (rc == VINF_SUCCESS)
7907 {
7908 *pu64Dst = u64Value;
7909 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7910 }
7911
7912 /* Commit the new RSP value unless we an access handler made trouble. */
7913 if (rc == VINF_SUCCESS)
7914 *pTmpRsp = NewRsp;
7915
7916 return rc;
7917}
7918
7919
7920/**
7921 * Pops a word from the stack, using a temporary stack pointer.
7922 *
7923 * @returns Strict VBox status code.
7924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7925 * @param pu16Value Where to store the popped value.
7926 * @param pTmpRsp Pointer to the temporary stack pointer.
7927 */
7928VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7929{
7930 /* Increment the stack pointer. */
7931 RTUINT64U NewRsp = *pTmpRsp;
7932 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
7933
7934 /* Write the word the lazy way. */
7935 uint16_t const *pu16Src;
7936 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7937 if (rc == VINF_SUCCESS)
7938 {
7939 *pu16Value = *pu16Src;
7940 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7941
7942 /* Commit the new RSP value. */
7943 if (rc == VINF_SUCCESS)
7944 *pTmpRsp = NewRsp;
7945 }
7946
7947 return rc;
7948}
7949
7950
7951/**
7952 * Pops a dword from the stack, using a temporary stack pointer.
7953 *
7954 * @returns Strict VBox status code.
7955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7956 * @param pu32Value Where to store the popped value.
7957 * @param pTmpRsp Pointer to the temporary stack pointer.
7958 */
7959VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7960{
7961 /* Increment the stack pointer. */
7962 RTUINT64U NewRsp = *pTmpRsp;
7963 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
7964
7965 /* Write the word the lazy way. */
7966 uint32_t const *pu32Src;
7967 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7968 if (rc == VINF_SUCCESS)
7969 {
7970 *pu32Value = *pu32Src;
7971 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7972
7973 /* Commit the new RSP value. */
7974 if (rc == VINF_SUCCESS)
7975 *pTmpRsp = NewRsp;
7976 }
7977
7978 return rc;
7979}
7980
7981
7982/**
7983 * Pops a qword from the stack, using a temporary stack pointer.
7984 *
7985 * @returns Strict VBox status code.
7986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7987 * @param pu64Value Where to store the popped value.
7988 * @param pTmpRsp Pointer to the temporary stack pointer.
7989 */
7990VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7991{
7992 /* Increment the stack pointer. */
7993 RTUINT64U NewRsp = *pTmpRsp;
7994 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
7995
7996 /* Write the word the lazy way. */
7997 uint64_t const *pu64Src;
7998 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
7999 if (rcStrict == VINF_SUCCESS)
8000 {
8001 *pu64Value = *pu64Src;
8002 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8003
8004 /* Commit the new RSP value. */
8005 if (rcStrict == VINF_SUCCESS)
8006 *pTmpRsp = NewRsp;
8007 }
8008
8009 return rcStrict;
8010}
8011
8012
8013/**
8014 * Begin a special stack push (used by interrupt, exceptions and such).
8015 *
8016 * This will raise \#SS or \#PF if appropriate.
8017 *
8018 * @returns Strict VBox status code.
8019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8020 * @param cbMem The number of bytes to push onto the stack.
8021 * @param ppvMem Where to return the pointer to the stack memory.
8022 * As with the other memory functions this could be
8023 * direct access or bounce buffered access, so
8024 * don't commit register until the commit call
8025 * succeeds.
8026 * @param puNewRsp Where to return the new RSP value. This must be
8027 * passed unchanged to
8028 * iemMemStackPushCommitSpecial().
8029 */
8030VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8031{
8032 Assert(cbMem < UINT8_MAX);
8033 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8034 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
8035}
8036
8037
8038/**
8039 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8040 *
8041 * This will update the rSP.
8042 *
8043 * @returns Strict VBox status code.
8044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8045 * @param pvMem The pointer returned by
8046 * iemMemStackPushBeginSpecial().
8047 * @param uNewRsp The new RSP value returned by
8048 * iemMemStackPushBeginSpecial().
8049 */
8050VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8051{
8052 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8053 if (rcStrict == VINF_SUCCESS)
8054 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8055 return rcStrict;
8056}
8057
8058
8059/**
8060 * Begin a special stack pop (used by iret, retf and such).
8061 *
8062 * This will raise \#SS or \#PF if appropriate.
8063 *
8064 * @returns Strict VBox status code.
8065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8066 * @param cbMem The number of bytes to pop from the stack.
8067 * @param ppvMem Where to return the pointer to the stack memory.
8068 * @param puNewRsp Where to return the new RSP value. This must be
8069 * assigned to CPUMCTX::rsp manually some time
8070 * after iemMemStackPopDoneSpecial() has been
8071 * called.
8072 */
8073VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8074{
8075 Assert(cbMem < UINT8_MAX);
8076 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8077 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8078}
8079
8080
8081/**
8082 * Continue a special stack pop (used by iret and retf).
8083 *
8084 * This will raise \#SS or \#PF if appropriate.
8085 *
8086 * @returns Strict VBox status code.
8087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8088 * @param cbMem The number of bytes to pop from the stack.
8089 * @param ppvMem Where to return the pointer to the stack memory.
8090 * @param puNewRsp Where to return the new RSP value. This must be
8091 * assigned to CPUMCTX::rsp manually some time
8092 * after iemMemStackPopDoneSpecial() has been
8093 * called.
8094 */
8095VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8096{
8097 Assert(cbMem < UINT8_MAX);
8098 RTUINT64U NewRsp;
8099 NewRsp.u = *puNewRsp;
8100 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8101 *puNewRsp = NewRsp.u;
8102 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
8103}
8104
8105
8106/**
8107 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8108 * iemMemStackPopContinueSpecial).
8109 *
8110 * The caller will manually commit the rSP.
8111 *
8112 * @returns Strict VBox status code.
8113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8114 * @param pvMem The pointer returned by
8115 * iemMemStackPopBeginSpecial() or
8116 * iemMemStackPopContinueSpecial().
8117 */
8118VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8119{
8120 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8121}
8122
8123
8124/**
8125 * Fetches a system table byte.
8126 *
8127 * @returns Strict VBox status code.
8128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8129 * @param pbDst Where to return the byte.
8130 * @param iSegReg The index of the segment register to use for
8131 * this access. The base and limits are checked.
8132 * @param GCPtrMem The address of the guest memory.
8133 */
8134VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8135{
8136 /* The lazy approach for now... */
8137 uint8_t const *pbSrc;
8138 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8139 if (rc == VINF_SUCCESS)
8140 {
8141 *pbDst = *pbSrc;
8142 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8143 }
8144 return rc;
8145}
8146
8147
8148/**
8149 * Fetches a system table word.
8150 *
8151 * @returns Strict VBox status code.
8152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8153 * @param pu16Dst Where to return the word.
8154 * @param iSegReg The index of the segment register to use for
8155 * this access. The base and limits are checked.
8156 * @param GCPtrMem The address of the guest memory.
8157 */
8158VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8159{
8160 /* The lazy approach for now... */
8161 uint16_t const *pu16Src;
8162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8163 if (rc == VINF_SUCCESS)
8164 {
8165 *pu16Dst = *pu16Src;
8166 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8167 }
8168 return rc;
8169}
8170
8171
8172/**
8173 * Fetches a system table dword.
8174 *
8175 * @returns Strict VBox status code.
8176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8177 * @param pu32Dst Where to return the dword.
8178 * @param iSegReg The index of the segment register to use for
8179 * this access. The base and limits are checked.
8180 * @param GCPtrMem The address of the guest memory.
8181 */
8182VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8183{
8184 /* The lazy approach for now... */
8185 uint32_t const *pu32Src;
8186 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8187 if (rc == VINF_SUCCESS)
8188 {
8189 *pu32Dst = *pu32Src;
8190 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8191 }
8192 return rc;
8193}
8194
8195
8196/**
8197 * Fetches a system table qword.
8198 *
8199 * @returns Strict VBox status code.
8200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8201 * @param pu64Dst Where to return the qword.
8202 * @param iSegReg The index of the segment register to use for
8203 * this access. The base and limits are checked.
8204 * @param GCPtrMem The address of the guest memory.
8205 */
8206VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8207{
8208 /* The lazy approach for now... */
8209 uint64_t const *pu64Src;
8210 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
8211 if (rc == VINF_SUCCESS)
8212 {
8213 *pu64Dst = *pu64Src;
8214 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8215 }
8216 return rc;
8217}
8218
8219
8220/**
8221 * Fetches a descriptor table entry with caller specified error code.
8222 *
8223 * @returns Strict VBox status code.
8224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8225 * @param pDesc Where to return the descriptor table entry.
8226 * @param uSel The selector which table entry to fetch.
8227 * @param uXcpt The exception to raise on table lookup error.
8228 * @param uErrorCode The error code associated with the exception.
8229 */
8230static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8231 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8232{
8233 AssertPtr(pDesc);
8234 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8235
8236 /** @todo did the 286 require all 8 bytes to be accessible? */
8237 /*
8238 * Get the selector table base and check bounds.
8239 */
8240 RTGCPTR GCPtrBase;
8241 if (uSel & X86_SEL_LDT)
8242 {
8243 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8244 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8245 {
8246 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8247 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8248 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8249 uErrorCode, 0);
8250 }
8251
8252 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8253 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8254 }
8255 else
8256 {
8257 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8258 {
8259 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8260 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8261 uErrorCode, 0);
8262 }
8263 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8264 }
8265
8266 /*
8267 * Read the legacy descriptor and maybe the long mode extensions if
8268 * required.
8269 */
8270 VBOXSTRICTRC rcStrict;
8271 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8272 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8273 else
8274 {
8275 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8276 if (rcStrict == VINF_SUCCESS)
8277 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8278 if (rcStrict == VINF_SUCCESS)
8279 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8280 if (rcStrict == VINF_SUCCESS)
8281 pDesc->Legacy.au16[3] = 0;
8282 else
8283 return rcStrict;
8284 }
8285
8286 if (rcStrict == VINF_SUCCESS)
8287 {
8288 if ( !IEM_IS_LONG_MODE(pVCpu)
8289 || pDesc->Legacy.Gen.u1DescType)
8290 pDesc->Long.au64[1] = 0;
8291 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8292 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8293 else
8294 {
8295 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8296 /** @todo is this the right exception? */
8297 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8298 }
8299 }
8300 return rcStrict;
8301}
8302
8303
8304/**
8305 * Fetches a descriptor table entry.
8306 *
8307 * @returns Strict VBox status code.
8308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8309 * @param pDesc Where to return the descriptor table entry.
8310 * @param uSel The selector which table entry to fetch.
8311 * @param uXcpt The exception to raise on table lookup error.
8312 */
8313VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8314{
8315 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8316}
8317
8318
8319/**
8320 * Marks the selector descriptor as accessed (only non-system descriptors).
8321 *
8322 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8323 * will therefore skip the limit checks.
8324 *
8325 * @returns Strict VBox status code.
8326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8327 * @param uSel The selector.
8328 */
8329VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8330{
8331 /*
8332 * Get the selector table base and calculate the entry address.
8333 */
8334 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8335 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8336 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8337 GCPtr += uSel & X86_SEL_MASK;
8338
8339 /*
8340 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8341 * ugly stuff to avoid this. This will make sure it's an atomic access
8342 * as well more or less remove any question about 8-bit or 32-bit accesss.
8343 */
8344 VBOXSTRICTRC rcStrict;
8345 uint32_t volatile *pu32;
8346 if ((GCPtr & 3) == 0)
8347 {
8348 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8349 GCPtr += 2 + 2;
8350 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8351 if (rcStrict != VINF_SUCCESS)
8352 return rcStrict;
8353 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8354 }
8355 else
8356 {
8357 /* The misaligned GDT/LDT case, map the whole thing. */
8358 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
8359 if (rcStrict != VINF_SUCCESS)
8360 return rcStrict;
8361 switch ((uintptr_t)pu32 & 3)
8362 {
8363 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8364 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8365 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8366 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8367 }
8368 }
8369
8370 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8371}
8372
8373/** @} */
8374
8375/** @name Opcode Helpers.
8376 * @{
8377 */
8378
8379/**
8380 * Calculates the effective address of a ModR/M memory operand.
8381 *
8382 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8383 *
8384 * @return Strict VBox status code.
8385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8386 * @param bRm The ModRM byte.
8387 * @param cbImm The size of any immediate following the
8388 * effective address opcode bytes. Important for
8389 * RIP relative addressing.
8390 * @param pGCPtrEff Where to return the effective address.
8391 */
8392VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8393{
8394 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8395# define SET_SS_DEF() \
8396 do \
8397 { \
8398 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8399 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8400 } while (0)
8401
8402 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8403 {
8404/** @todo Check the effective address size crap! */
8405 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8406 {
8407 uint16_t u16EffAddr;
8408
8409 /* Handle the disp16 form with no registers first. */
8410 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8411 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8412 else
8413 {
8414 /* Get the displacment. */
8415 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8416 {
8417 case 0: u16EffAddr = 0; break;
8418 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8419 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8420 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8421 }
8422
8423 /* Add the base and index registers to the disp. */
8424 switch (bRm & X86_MODRM_RM_MASK)
8425 {
8426 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8427 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8428 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8429 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8430 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8431 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8432 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8433 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8434 }
8435 }
8436
8437 *pGCPtrEff = u16EffAddr;
8438 }
8439 else
8440 {
8441 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8442 uint32_t u32EffAddr;
8443
8444 /* Handle the disp32 form with no registers first. */
8445 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8446 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8447 else
8448 {
8449 /* Get the register (or SIB) value. */
8450 switch ((bRm & X86_MODRM_RM_MASK))
8451 {
8452 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8453 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8454 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8455 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8456 case 4: /* SIB */
8457 {
8458 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8459
8460 /* Get the index and scale it. */
8461 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8462 {
8463 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8464 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8465 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8466 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8467 case 4: u32EffAddr = 0; /*none */ break;
8468 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8469 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8470 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8471 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8472 }
8473 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8474
8475 /* add base */
8476 switch (bSib & X86_SIB_BASE_MASK)
8477 {
8478 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8479 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8480 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8481 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8482 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8483 case 5:
8484 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8485 {
8486 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8487 SET_SS_DEF();
8488 }
8489 else
8490 {
8491 uint32_t u32Disp;
8492 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8493 u32EffAddr += u32Disp;
8494 }
8495 break;
8496 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8497 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8498 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8499 }
8500 break;
8501 }
8502 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8503 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8504 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8505 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8506 }
8507
8508 /* Get and add the displacement. */
8509 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8510 {
8511 case 0:
8512 break;
8513 case 1:
8514 {
8515 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8516 u32EffAddr += i8Disp;
8517 break;
8518 }
8519 case 2:
8520 {
8521 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8522 u32EffAddr += u32Disp;
8523 break;
8524 }
8525 default:
8526 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8527 }
8528
8529 }
8530 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8531 *pGCPtrEff = u32EffAddr;
8532 else
8533 {
8534 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8535 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8536 }
8537 }
8538 }
8539 else
8540 {
8541 uint64_t u64EffAddr;
8542
8543 /* Handle the rip+disp32 form with no registers first. */
8544 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8545 {
8546 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8547 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8548 }
8549 else
8550 {
8551 /* Get the register (or SIB) value. */
8552 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8553 {
8554 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8555 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8556 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8557 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8558 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8559 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8560 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8561 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8562 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8563 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8564 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8565 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8566 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8567 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8568 /* SIB */
8569 case 4:
8570 case 12:
8571 {
8572 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8573
8574 /* Get the index and scale it. */
8575 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8576 {
8577 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8578 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8579 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8580 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8581 case 4: u64EffAddr = 0; /*none */ break;
8582 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8583 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8584 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8585 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8586 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8587 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8588 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8589 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8590 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8591 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8592 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8593 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8594 }
8595 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8596
8597 /* add base */
8598 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8599 {
8600 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8601 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8602 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8603 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8604 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8605 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8606 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8607 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8608 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8609 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8610 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8611 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8612 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8613 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8614 /* complicated encodings */
8615 case 5:
8616 case 13:
8617 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8618 {
8619 if (!pVCpu->iem.s.uRexB)
8620 {
8621 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8622 SET_SS_DEF();
8623 }
8624 else
8625 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8626 }
8627 else
8628 {
8629 uint32_t u32Disp;
8630 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8631 u64EffAddr += (int32_t)u32Disp;
8632 }
8633 break;
8634 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8635 }
8636 break;
8637 }
8638 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8639 }
8640
8641 /* Get and add the displacement. */
8642 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8643 {
8644 case 0:
8645 break;
8646 case 1:
8647 {
8648 int8_t i8Disp;
8649 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8650 u64EffAddr += i8Disp;
8651 break;
8652 }
8653 case 2:
8654 {
8655 uint32_t u32Disp;
8656 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8657 u64EffAddr += (int32_t)u32Disp;
8658 break;
8659 }
8660 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8661 }
8662
8663 }
8664
8665 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8666 *pGCPtrEff = u64EffAddr;
8667 else
8668 {
8669 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8670 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8671 }
8672 }
8673
8674 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8675 return VINF_SUCCESS;
8676}
8677
8678
8679/**
8680 * Calculates the effective address of a ModR/M memory operand.
8681 *
8682 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8683 *
8684 * @return Strict VBox status code.
8685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8686 * @param bRm The ModRM byte.
8687 * @param cbImm The size of any immediate following the
8688 * effective address opcode bytes. Important for
8689 * RIP relative addressing.
8690 * @param pGCPtrEff Where to return the effective address.
8691 * @param offRsp RSP displacement.
8692 */
8693VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8694{
8695 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8696# define SET_SS_DEF() \
8697 do \
8698 { \
8699 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8700 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8701 } while (0)
8702
8703 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8704 {
8705/** @todo Check the effective address size crap! */
8706 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8707 {
8708 uint16_t u16EffAddr;
8709
8710 /* Handle the disp16 form with no registers first. */
8711 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8712 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8713 else
8714 {
8715 /* Get the displacment. */
8716 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8717 {
8718 case 0: u16EffAddr = 0; break;
8719 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8720 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8721 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8722 }
8723
8724 /* Add the base and index registers to the disp. */
8725 switch (bRm & X86_MODRM_RM_MASK)
8726 {
8727 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8728 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8729 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8730 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8731 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8732 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8733 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8734 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8735 }
8736 }
8737
8738 *pGCPtrEff = u16EffAddr;
8739 }
8740 else
8741 {
8742 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8743 uint32_t u32EffAddr;
8744
8745 /* Handle the disp32 form with no registers first. */
8746 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8747 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8748 else
8749 {
8750 /* Get the register (or SIB) value. */
8751 switch ((bRm & X86_MODRM_RM_MASK))
8752 {
8753 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8754 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8755 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8756 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8757 case 4: /* SIB */
8758 {
8759 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8760
8761 /* Get the index and scale it. */
8762 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8763 {
8764 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8765 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8766 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8767 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8768 case 4: u32EffAddr = 0; /*none */ break;
8769 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8770 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8771 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8773 }
8774 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8775
8776 /* add base */
8777 switch (bSib & X86_SIB_BASE_MASK)
8778 {
8779 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8780 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8781 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8782 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8783 case 4:
8784 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8785 SET_SS_DEF();
8786 break;
8787 case 5:
8788 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8789 {
8790 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8791 SET_SS_DEF();
8792 }
8793 else
8794 {
8795 uint32_t u32Disp;
8796 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8797 u32EffAddr += u32Disp;
8798 }
8799 break;
8800 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8801 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8802 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8803 }
8804 break;
8805 }
8806 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8807 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8808 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8809 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8810 }
8811
8812 /* Get and add the displacement. */
8813 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8814 {
8815 case 0:
8816 break;
8817 case 1:
8818 {
8819 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8820 u32EffAddr += i8Disp;
8821 break;
8822 }
8823 case 2:
8824 {
8825 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8826 u32EffAddr += u32Disp;
8827 break;
8828 }
8829 default:
8830 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8831 }
8832
8833 }
8834 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8835 *pGCPtrEff = u32EffAddr;
8836 else
8837 {
8838 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8839 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8840 }
8841 }
8842 }
8843 else
8844 {
8845 uint64_t u64EffAddr;
8846
8847 /* Handle the rip+disp32 form with no registers first. */
8848 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8849 {
8850 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8851 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8852 }
8853 else
8854 {
8855 /* Get the register (or SIB) value. */
8856 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8857 {
8858 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8859 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8860 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8861 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8862 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8863 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8864 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8865 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8866 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8867 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8868 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8869 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8870 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8871 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8872 /* SIB */
8873 case 4:
8874 case 12:
8875 {
8876 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8877
8878 /* Get the index and scale it. */
8879 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8880 {
8881 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8882 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8883 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8884 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8885 case 4: u64EffAddr = 0; /*none */ break;
8886 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8887 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8888 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8889 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8890 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8891 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8892 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8893 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8894 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8895 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8896 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8897 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8898 }
8899 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8900
8901 /* add base */
8902 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8903 {
8904 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8905 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8906 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8907 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8908 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
8909 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8910 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8911 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8912 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8913 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8914 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8915 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8916 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8917 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8918 /* complicated encodings */
8919 case 5:
8920 case 13:
8921 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8922 {
8923 if (!pVCpu->iem.s.uRexB)
8924 {
8925 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8926 SET_SS_DEF();
8927 }
8928 else
8929 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8930 }
8931 else
8932 {
8933 uint32_t u32Disp;
8934 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8935 u64EffAddr += (int32_t)u32Disp;
8936 }
8937 break;
8938 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8939 }
8940 break;
8941 }
8942 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8943 }
8944
8945 /* Get and add the displacement. */
8946 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8947 {
8948 case 0:
8949 break;
8950 case 1:
8951 {
8952 int8_t i8Disp;
8953 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8954 u64EffAddr += i8Disp;
8955 break;
8956 }
8957 case 2:
8958 {
8959 uint32_t u32Disp;
8960 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8961 u64EffAddr += (int32_t)u32Disp;
8962 break;
8963 }
8964 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8965 }
8966
8967 }
8968
8969 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8970 *pGCPtrEff = u64EffAddr;
8971 else
8972 {
8973 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8974 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8975 }
8976 }
8977
8978 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8979 return VINF_SUCCESS;
8980}
8981
8982
8983#ifdef IEM_WITH_SETJMP
8984/**
8985 * Calculates the effective address of a ModR/M memory operand.
8986 *
8987 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8988 *
8989 * May longjmp on internal error.
8990 *
8991 * @return The effective address.
8992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8993 * @param bRm The ModRM byte.
8994 * @param cbImm The size of any immediate following the
8995 * effective address opcode bytes. Important for
8996 * RIP relative addressing.
8997 */
8998RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
8999{
9000 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9001# define SET_SS_DEF() \
9002 do \
9003 { \
9004 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9005 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9006 } while (0)
9007
9008 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9009 {
9010/** @todo Check the effective address size crap! */
9011 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9012 {
9013 uint16_t u16EffAddr;
9014
9015 /* Handle the disp16 form with no registers first. */
9016 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9017 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9018 else
9019 {
9020 /* Get the displacment. */
9021 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9022 {
9023 case 0: u16EffAddr = 0; break;
9024 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9025 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9026 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9027 }
9028
9029 /* Add the base and index registers to the disp. */
9030 switch (bRm & X86_MODRM_RM_MASK)
9031 {
9032 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9033 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9034 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9035 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9036 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9037 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9038 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9039 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9040 }
9041 }
9042
9043 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9044 return u16EffAddr;
9045 }
9046
9047 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9048 uint32_t u32EffAddr;
9049
9050 /* Handle the disp32 form with no registers first. */
9051 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9052 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9053 else
9054 {
9055 /* Get the register (or SIB) value. */
9056 switch ((bRm & X86_MODRM_RM_MASK))
9057 {
9058 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9059 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9060 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9061 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9062 case 4: /* SIB */
9063 {
9064 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9065
9066 /* Get the index and scale it. */
9067 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9068 {
9069 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9070 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9071 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9072 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9073 case 4: u32EffAddr = 0; /*none */ break;
9074 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9075 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9076 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9077 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9078 }
9079 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9080
9081 /* add base */
9082 switch (bSib & X86_SIB_BASE_MASK)
9083 {
9084 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9085 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9086 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9087 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9088 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9089 case 5:
9090 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9091 {
9092 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9093 SET_SS_DEF();
9094 }
9095 else
9096 {
9097 uint32_t u32Disp;
9098 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9099 u32EffAddr += u32Disp;
9100 }
9101 break;
9102 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9103 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9104 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9105 }
9106 break;
9107 }
9108 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9109 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9110 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9111 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9112 }
9113
9114 /* Get and add the displacement. */
9115 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9116 {
9117 case 0:
9118 break;
9119 case 1:
9120 {
9121 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9122 u32EffAddr += i8Disp;
9123 break;
9124 }
9125 case 2:
9126 {
9127 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9128 u32EffAddr += u32Disp;
9129 break;
9130 }
9131 default:
9132 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9133 }
9134 }
9135
9136 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9137 {
9138 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9139 return u32EffAddr;
9140 }
9141 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9142 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9143 return u32EffAddr & UINT16_MAX;
9144 }
9145
9146 uint64_t u64EffAddr;
9147
9148 /* Handle the rip+disp32 form with no registers first. */
9149 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9150 {
9151 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9152 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9153 }
9154 else
9155 {
9156 /* Get the register (or SIB) value. */
9157 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9158 {
9159 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9160 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9161 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9162 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9163 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9164 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9165 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9166 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9167 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9168 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9169 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9170 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9171 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9172 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9173 /* SIB */
9174 case 4:
9175 case 12:
9176 {
9177 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9178
9179 /* Get the index and scale it. */
9180 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9181 {
9182 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9183 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9184 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9185 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9186 case 4: u64EffAddr = 0; /*none */ break;
9187 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9188 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9189 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9190 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9191 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9192 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9193 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9194 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9195 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9196 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9197 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9198 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9199 }
9200 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9201
9202 /* add base */
9203 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9204 {
9205 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9206 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9207 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9208 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9209 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9210 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9211 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9212 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9213 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9214 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9215 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9216 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9217 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9218 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9219 /* complicated encodings */
9220 case 5:
9221 case 13:
9222 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9223 {
9224 if (!pVCpu->iem.s.uRexB)
9225 {
9226 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9227 SET_SS_DEF();
9228 }
9229 else
9230 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9231 }
9232 else
9233 {
9234 uint32_t u32Disp;
9235 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9236 u64EffAddr += (int32_t)u32Disp;
9237 }
9238 break;
9239 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9240 }
9241 break;
9242 }
9243 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9244 }
9245
9246 /* Get and add the displacement. */
9247 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9248 {
9249 case 0:
9250 break;
9251 case 1:
9252 {
9253 int8_t i8Disp;
9254 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9255 u64EffAddr += i8Disp;
9256 break;
9257 }
9258 case 2:
9259 {
9260 uint32_t u32Disp;
9261 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9262 u64EffAddr += (int32_t)u32Disp;
9263 break;
9264 }
9265 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9266 }
9267
9268 }
9269
9270 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9271 {
9272 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9273 return u64EffAddr;
9274 }
9275 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9276 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9277 return u64EffAddr & UINT32_MAX;
9278}
9279#endif /* IEM_WITH_SETJMP */
9280
9281/** @} */
9282
9283
9284#ifdef LOG_ENABLED
9285/**
9286 * Logs the current instruction.
9287 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9288 * @param fSameCtx Set if we have the same context information as the VMM,
9289 * clear if we may have already executed an instruction in
9290 * our debug context. When clear, we assume IEMCPU holds
9291 * valid CPU mode info.
9292 *
9293 * The @a fSameCtx parameter is now misleading and obsolete.
9294 * @param pszFunction The IEM function doing the execution.
9295 */
9296static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9297{
9298# ifdef IN_RING3
9299 if (LogIs2Enabled())
9300 {
9301 char szInstr[256];
9302 uint32_t cbInstr = 0;
9303 if (fSameCtx)
9304 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9305 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9306 szInstr, sizeof(szInstr), &cbInstr);
9307 else
9308 {
9309 uint32_t fFlags = 0;
9310 switch (pVCpu->iem.s.enmCpuMode)
9311 {
9312 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9313 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9314 case IEMMODE_16BIT:
9315 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9316 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9317 else
9318 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9319 break;
9320 }
9321 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9322 szInstr, sizeof(szInstr), &cbInstr);
9323 }
9324
9325 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9326 Log2(("**** %s\n"
9327 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9328 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9329 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9330 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9331 " %s\n"
9332 , pszFunction,
9333 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9334 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9335 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9336 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9337 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9338 szInstr));
9339
9340 if (LogIs3Enabled())
9341 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9342 }
9343 else
9344# endif
9345 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9346 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9347 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9348}
9349#endif /* LOG_ENABLED */
9350
9351
9352#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9353/**
9354 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9355 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9356 *
9357 * @returns Modified rcStrict.
9358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9359 * @param rcStrict The instruction execution status.
9360 */
9361static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9362{
9363 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9364 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9365 {
9366 /* VMX preemption timer takes priority over NMI-window exits. */
9367 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9368 {
9369 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9370 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9371 }
9372 /*
9373 * Check remaining intercepts.
9374 *
9375 * NMI-window and Interrupt-window VM-exits.
9376 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9377 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9378 *
9379 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9380 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9381 */
9382 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9383 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9384 && !TRPMHasTrap(pVCpu))
9385 {
9386 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9387 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9388 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9389 {
9390 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9391 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9392 }
9393 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9394 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9395 {
9396 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9397 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9398 }
9399 }
9400 }
9401 /* TPR-below threshold/APIC write has the highest priority. */
9402 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9403 {
9404 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9405 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9406 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9407 }
9408 /* MTF takes priority over VMX-preemption timer. */
9409 else
9410 {
9411 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9412 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9413 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9414 }
9415 return rcStrict;
9416}
9417#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9418
9419
9420/**
9421 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9422 * IEMExecOneWithPrefetchedByPC.
9423 *
9424 * Similar code is found in IEMExecLots.
9425 *
9426 * @return Strict VBox status code.
9427 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9428 * @param fExecuteInhibit If set, execute the instruction following CLI,
9429 * POP SS and MOV SS,GR.
9430 * @param pszFunction The calling function name.
9431 */
9432DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9433{
9434 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9435 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9436 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9437 RT_NOREF_PV(pszFunction);
9438
9439#ifdef IEM_WITH_SETJMP
9440 VBOXSTRICTRC rcStrict;
9441 jmp_buf JmpBuf;
9442 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9443 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9444 if ((rcStrict = setjmp(JmpBuf)) == 0)
9445 {
9446 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9447 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9448 }
9449 else
9450 pVCpu->iem.s.cLongJumps++;
9451 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9452#else
9453 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9454 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9455#endif
9456 if (rcStrict == VINF_SUCCESS)
9457 pVCpu->iem.s.cInstructions++;
9458 if (pVCpu->iem.s.cActiveMappings > 0)
9459 {
9460 Assert(rcStrict != VINF_SUCCESS);
9461 iemMemRollback(pVCpu);
9462 }
9463 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9464 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9465 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9466
9467//#ifdef DEBUG
9468// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9469//#endif
9470
9471#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9472 /*
9473 * Perform any VMX nested-guest instruction boundary actions.
9474 *
9475 * If any of these causes a VM-exit, we must skip executing the next
9476 * instruction (would run into stale page tables). A VM-exit makes sure
9477 * there is no interrupt-inhibition, so that should ensure we don't go
9478 * to try execute the next instruction. Clearing fExecuteInhibit is
9479 * problematic because of the setjmp/longjmp clobbering above.
9480 */
9481 if ( rcStrict == VINF_SUCCESS
9482 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9483 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9484 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
9485#endif
9486
9487 /* Execute the next instruction as well if a cli, pop ss or
9488 mov ss, Gr has just completed successfully. */
9489 if ( fExecuteInhibit
9490 && rcStrict == VINF_SUCCESS
9491 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9492 && EMIsInhibitInterruptsActive(pVCpu))
9493 {
9494 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9495 if (rcStrict == VINF_SUCCESS)
9496 {
9497#ifdef LOG_ENABLED
9498 iemLogCurInstr(pVCpu, false, pszFunction);
9499#endif
9500#ifdef IEM_WITH_SETJMP
9501 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9502 if ((rcStrict = setjmp(JmpBuf)) == 0)
9503 {
9504 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9505 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9506 }
9507 else
9508 pVCpu->iem.s.cLongJumps++;
9509 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9510#else
9511 IEM_OPCODE_GET_NEXT_U8(&b);
9512 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9513#endif
9514 if (rcStrict == VINF_SUCCESS)
9515 pVCpu->iem.s.cInstructions++;
9516 if (pVCpu->iem.s.cActiveMappings > 0)
9517 {
9518 Assert(rcStrict != VINF_SUCCESS);
9519 iemMemRollback(pVCpu);
9520 }
9521 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9522 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9523 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9524 }
9525 else if (pVCpu->iem.s.cActiveMappings > 0)
9526 iemMemRollback(pVCpu);
9527 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9528 }
9529
9530 /*
9531 * Return value fiddling, statistics and sanity assertions.
9532 */
9533 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9534
9535 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9536 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9537 return rcStrict;
9538}
9539
9540
9541/**
9542 * Execute one instruction.
9543 *
9544 * @return Strict VBox status code.
9545 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9546 */
9547VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9548{
9549 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9550#ifdef LOG_ENABLED
9551 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9552#endif
9553
9554 /*
9555 * Do the decoding and emulation.
9556 */
9557 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9558 if (rcStrict == VINF_SUCCESS)
9559 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9560 else if (pVCpu->iem.s.cActiveMappings > 0)
9561 iemMemRollback(pVCpu);
9562
9563 if (rcStrict != VINF_SUCCESS)
9564 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9565 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9566 return rcStrict;
9567}
9568
9569
9570VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9571{
9572 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9573
9574 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9575 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9576 if (rcStrict == VINF_SUCCESS)
9577 {
9578 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9579 if (pcbWritten)
9580 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9581 }
9582 else if (pVCpu->iem.s.cActiveMappings > 0)
9583 iemMemRollback(pVCpu);
9584
9585 return rcStrict;
9586}
9587
9588
9589VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9590 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9591{
9592 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9593
9594 VBOXSTRICTRC rcStrict;
9595 if ( cbOpcodeBytes
9596 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9597 {
9598 iemInitDecoder(pVCpu, false, false);
9599#ifdef IEM_WITH_CODE_TLB
9600 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9601 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9602 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9603 pVCpu->iem.s.offCurInstrStart = 0;
9604 pVCpu->iem.s.offInstrNextByte = 0;
9605#else
9606 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9607 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9608#endif
9609 rcStrict = VINF_SUCCESS;
9610 }
9611 else
9612 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9613 if (rcStrict == VINF_SUCCESS)
9614 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9615 else if (pVCpu->iem.s.cActiveMappings > 0)
9616 iemMemRollback(pVCpu);
9617
9618 return rcStrict;
9619}
9620
9621
9622VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9623{
9624 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9625
9626 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9627 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9628 if (rcStrict == VINF_SUCCESS)
9629 {
9630 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9631 if (pcbWritten)
9632 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9633 }
9634 else if (pVCpu->iem.s.cActiveMappings > 0)
9635 iemMemRollback(pVCpu);
9636
9637 return rcStrict;
9638}
9639
9640
9641VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9642 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9643{
9644 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9645
9646 VBOXSTRICTRC rcStrict;
9647 if ( cbOpcodeBytes
9648 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9649 {
9650 iemInitDecoder(pVCpu, true, false);
9651#ifdef IEM_WITH_CODE_TLB
9652 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9653 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9654 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9655 pVCpu->iem.s.offCurInstrStart = 0;
9656 pVCpu->iem.s.offInstrNextByte = 0;
9657#else
9658 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9659 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9660#endif
9661 rcStrict = VINF_SUCCESS;
9662 }
9663 else
9664 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9665 if (rcStrict == VINF_SUCCESS)
9666 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9667 else if (pVCpu->iem.s.cActiveMappings > 0)
9668 iemMemRollback(pVCpu);
9669
9670 return rcStrict;
9671}
9672
9673
9674/**
9675 * For debugging DISGetParamSize, may come in handy.
9676 *
9677 * @returns Strict VBox status code.
9678 * @param pVCpu The cross context virtual CPU structure of the
9679 * calling EMT.
9680 * @param pCtxCore The context core structure.
9681 * @param OpcodeBytesPC The PC of the opcode bytes.
9682 * @param pvOpcodeBytes Prefeched opcode bytes.
9683 * @param cbOpcodeBytes Number of prefetched bytes.
9684 * @param pcbWritten Where to return the number of bytes written.
9685 * Optional.
9686 */
9687VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9688 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9689 uint32_t *pcbWritten)
9690{
9691 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9692
9693 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9694 VBOXSTRICTRC rcStrict;
9695 if ( cbOpcodeBytes
9696 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9697 {
9698 iemInitDecoder(pVCpu, true, false);
9699#ifdef IEM_WITH_CODE_TLB
9700 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9701 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9702 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9703 pVCpu->iem.s.offCurInstrStart = 0;
9704 pVCpu->iem.s.offInstrNextByte = 0;
9705#else
9706 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9707 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9708#endif
9709 rcStrict = VINF_SUCCESS;
9710 }
9711 else
9712 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9713 if (rcStrict == VINF_SUCCESS)
9714 {
9715 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9716 if (pcbWritten)
9717 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9718 }
9719 else if (pVCpu->iem.s.cActiveMappings > 0)
9720 iemMemRollback(pVCpu);
9721
9722 return rcStrict;
9723}
9724
9725
9726/**
9727 * For handling split cacheline lock operations when the host has split-lock
9728 * detection enabled.
9729 *
9730 * This will cause the interpreter to disregard the lock prefix and implicit
9731 * locking (xchg).
9732 *
9733 * @returns Strict VBox status code.
9734 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9735 */
9736VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9737{
9738 /*
9739 * Do the decoding and emulation.
9740 */
9741 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9742 if (rcStrict == VINF_SUCCESS)
9743 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9744 else if (pVCpu->iem.s.cActiveMappings > 0)
9745 iemMemRollback(pVCpu);
9746
9747 if (rcStrict != VINF_SUCCESS)
9748 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9749 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9750 return rcStrict;
9751}
9752
9753
9754VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9755{
9756 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9757 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9758
9759 /*
9760 * See if there is an interrupt pending in TRPM, inject it if we can.
9761 */
9762 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9763#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9764 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9765 if (fIntrEnabled)
9766 {
9767 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9768 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9769 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9770 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9771 else
9772 {
9773 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9774 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9775 }
9776 }
9777#else
9778 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9779#endif
9780
9781 /** @todo What if we are injecting an exception and not an interrupt? Is that
9782 * possible here? For now we assert it is indeed only an interrupt. */
9783 if ( fIntrEnabled
9784 && TRPMHasTrap(pVCpu)
9785 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
9786 {
9787 uint8_t u8TrapNo;
9788 TRPMEVENT enmType;
9789 uint32_t uErrCode;
9790 RTGCPTR uCr2;
9791 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
9792 AssertRC(rc2);
9793 Assert(enmType == TRPM_HARDWARE_INT);
9794 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9795 TRPMResetTrap(pVCpu);
9796#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9797 /* Injecting an event may cause a VM-exit. */
9798 if ( rcStrict != VINF_SUCCESS
9799 && rcStrict != VINF_IEM_RAISED_XCPT)
9800 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9801#else
9802 NOREF(rcStrict);
9803#endif
9804 }
9805
9806 /*
9807 * Initial decoder init w/ prefetch, then setup setjmp.
9808 */
9809 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9810 if (rcStrict == VINF_SUCCESS)
9811 {
9812#ifdef IEM_WITH_SETJMP
9813 jmp_buf JmpBuf;
9814 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9815 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9816 pVCpu->iem.s.cActiveMappings = 0;
9817 if ((rcStrict = setjmp(JmpBuf)) == 0)
9818#endif
9819 {
9820 /*
9821 * The run loop. We limit ourselves to 4096 instructions right now.
9822 */
9823 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9824 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9825 for (;;)
9826 {
9827 /*
9828 * Log the state.
9829 */
9830#ifdef LOG_ENABLED
9831 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9832#endif
9833
9834 /*
9835 * Do the decoding and emulation.
9836 */
9837 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9838 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9839 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9840 {
9841 Assert(pVCpu->iem.s.cActiveMappings == 0);
9842 pVCpu->iem.s.cInstructions++;
9843 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9844 {
9845 uint64_t fCpu = pVCpu->fLocalForcedActions
9846 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9847 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9848 | VMCPU_FF_TLB_FLUSH
9849 | VMCPU_FF_INHIBIT_INTERRUPTS
9850 | VMCPU_FF_BLOCK_NMIS
9851 | VMCPU_FF_UNHALT ));
9852
9853 if (RT_LIKELY( ( !fCpu
9854 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9855 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9856 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9857 {
9858 if (cMaxInstructionsGccStupidity-- > 0)
9859 {
9860 /* Poll timers every now an then according to the caller's specs. */
9861 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9862 || !TMTimerPollBool(pVM, pVCpu))
9863 {
9864 Assert(pVCpu->iem.s.cActiveMappings == 0);
9865 iemReInitDecoder(pVCpu);
9866 continue;
9867 }
9868 }
9869 }
9870 }
9871 Assert(pVCpu->iem.s.cActiveMappings == 0);
9872 }
9873 else if (pVCpu->iem.s.cActiveMappings > 0)
9874 iemMemRollback(pVCpu);
9875 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9876 break;
9877 }
9878 }
9879#ifdef IEM_WITH_SETJMP
9880 else
9881 {
9882 if (pVCpu->iem.s.cActiveMappings > 0)
9883 iemMemRollback(pVCpu);
9884# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9885 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9886# endif
9887 pVCpu->iem.s.cLongJumps++;
9888 }
9889 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9890#endif
9891
9892 /*
9893 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9894 */
9895 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9896 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9897 }
9898 else
9899 {
9900 if (pVCpu->iem.s.cActiveMappings > 0)
9901 iemMemRollback(pVCpu);
9902
9903#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9904 /*
9905 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9906 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9907 */
9908 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9909#endif
9910 }
9911
9912 /*
9913 * Maybe re-enter raw-mode and log.
9914 */
9915 if (rcStrict != VINF_SUCCESS)
9916 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9917 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9918 if (pcInstructions)
9919 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9920 return rcStrict;
9921}
9922
9923
9924/**
9925 * Interface used by EMExecuteExec, does exit statistics and limits.
9926 *
9927 * @returns Strict VBox status code.
9928 * @param pVCpu The cross context virtual CPU structure.
9929 * @param fWillExit To be defined.
9930 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9931 * @param cMaxInstructions Maximum number of instructions to execute.
9932 * @param cMaxInstructionsWithoutExits
9933 * The max number of instructions without exits.
9934 * @param pStats Where to return statistics.
9935 */
9936VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9937 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9938{
9939 NOREF(fWillExit); /** @todo define flexible exit crits */
9940
9941 /*
9942 * Initialize return stats.
9943 */
9944 pStats->cInstructions = 0;
9945 pStats->cExits = 0;
9946 pStats->cMaxExitDistance = 0;
9947 pStats->cReserved = 0;
9948
9949 /*
9950 * Initial decoder init w/ prefetch, then setup setjmp.
9951 */
9952 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9953 if (rcStrict == VINF_SUCCESS)
9954 {
9955#ifdef IEM_WITH_SETJMP
9956 jmp_buf JmpBuf;
9957 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9958 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9959 pVCpu->iem.s.cActiveMappings = 0;
9960 if ((rcStrict = setjmp(JmpBuf)) == 0)
9961#endif
9962 {
9963#ifdef IN_RING0
9964 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9965#endif
9966 uint32_t cInstructionSinceLastExit = 0;
9967
9968 /*
9969 * The run loop. We limit ourselves to 4096 instructions right now.
9970 */
9971 PVM pVM = pVCpu->CTX_SUFF(pVM);
9972 for (;;)
9973 {
9974 /*
9975 * Log the state.
9976 */
9977#ifdef LOG_ENABLED
9978 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9979#endif
9980
9981 /*
9982 * Do the decoding and emulation.
9983 */
9984 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9985
9986 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9987 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9988
9989 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9990 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9991 {
9992 pStats->cExits += 1;
9993 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9994 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9995 cInstructionSinceLastExit = 0;
9996 }
9997
9998 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9999 {
10000 Assert(pVCpu->iem.s.cActiveMappings == 0);
10001 pVCpu->iem.s.cInstructions++;
10002 pStats->cInstructions++;
10003 cInstructionSinceLastExit++;
10004 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10005 {
10006 uint64_t fCpu = pVCpu->fLocalForcedActions
10007 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10008 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10009 | VMCPU_FF_TLB_FLUSH
10010 | VMCPU_FF_INHIBIT_INTERRUPTS
10011 | VMCPU_FF_BLOCK_NMIS
10012 | VMCPU_FF_UNHALT ));
10013
10014 if (RT_LIKELY( ( ( !fCpu
10015 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10016 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10017 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10018 || pStats->cInstructions < cMinInstructions))
10019 {
10020 if (pStats->cInstructions < cMaxInstructions)
10021 {
10022 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10023 {
10024#ifdef IN_RING0
10025 if ( !fCheckPreemptionPending
10026 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10027#endif
10028 {
10029 Assert(pVCpu->iem.s.cActiveMappings == 0);
10030 iemReInitDecoder(pVCpu);
10031 continue;
10032 }
10033#ifdef IN_RING0
10034 rcStrict = VINF_EM_RAW_INTERRUPT;
10035 break;
10036#endif
10037 }
10038 }
10039 }
10040 Assert(!(fCpu & VMCPU_FF_IEM));
10041 }
10042 Assert(pVCpu->iem.s.cActiveMappings == 0);
10043 }
10044 else if (pVCpu->iem.s.cActiveMappings > 0)
10045 iemMemRollback(pVCpu);
10046 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10047 break;
10048 }
10049 }
10050#ifdef IEM_WITH_SETJMP
10051 else
10052 {
10053 if (pVCpu->iem.s.cActiveMappings > 0)
10054 iemMemRollback(pVCpu);
10055 pVCpu->iem.s.cLongJumps++;
10056 }
10057 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10058#endif
10059
10060 /*
10061 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10062 */
10063 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10065 }
10066 else
10067 {
10068 if (pVCpu->iem.s.cActiveMappings > 0)
10069 iemMemRollback(pVCpu);
10070
10071#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10072 /*
10073 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10074 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10075 */
10076 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10077#endif
10078 }
10079
10080 /*
10081 * Maybe re-enter raw-mode and log.
10082 */
10083 if (rcStrict != VINF_SUCCESS)
10084 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10085 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10086 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10087 return rcStrict;
10088}
10089
10090
10091/**
10092 * Injects a trap, fault, abort, software interrupt or external interrupt.
10093 *
10094 * The parameter list matches TRPMQueryTrapAll pretty closely.
10095 *
10096 * @returns Strict VBox status code.
10097 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10098 * @param u8TrapNo The trap number.
10099 * @param enmType What type is it (trap/fault/abort), software
10100 * interrupt or hardware interrupt.
10101 * @param uErrCode The error code if applicable.
10102 * @param uCr2 The CR2 value if applicable.
10103 * @param cbInstr The instruction length (only relevant for
10104 * software interrupts).
10105 */
10106VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10107 uint8_t cbInstr)
10108{
10109 iemInitDecoder(pVCpu, false, false);
10110#ifdef DBGFTRACE_ENABLED
10111 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10112 u8TrapNo, enmType, uErrCode, uCr2);
10113#endif
10114
10115 uint32_t fFlags;
10116 switch (enmType)
10117 {
10118 case TRPM_HARDWARE_INT:
10119 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10120 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10121 uErrCode = uCr2 = 0;
10122 break;
10123
10124 case TRPM_SOFTWARE_INT:
10125 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10126 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10127 uErrCode = uCr2 = 0;
10128 break;
10129
10130 case TRPM_TRAP:
10131 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10132 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10133 if (u8TrapNo == X86_XCPT_PF)
10134 fFlags |= IEM_XCPT_FLAGS_CR2;
10135 switch (u8TrapNo)
10136 {
10137 case X86_XCPT_DF:
10138 case X86_XCPT_TS:
10139 case X86_XCPT_NP:
10140 case X86_XCPT_SS:
10141 case X86_XCPT_PF:
10142 case X86_XCPT_AC:
10143 case X86_XCPT_GP:
10144 fFlags |= IEM_XCPT_FLAGS_ERR;
10145 break;
10146 }
10147 break;
10148
10149 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10150 }
10151
10152 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10153
10154 if (pVCpu->iem.s.cActiveMappings > 0)
10155 iemMemRollback(pVCpu);
10156
10157 return rcStrict;
10158}
10159
10160
10161/**
10162 * Injects the active TRPM event.
10163 *
10164 * @returns Strict VBox status code.
10165 * @param pVCpu The cross context virtual CPU structure.
10166 */
10167VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10168{
10169#ifndef IEM_IMPLEMENTS_TASKSWITCH
10170 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10171#else
10172 uint8_t u8TrapNo;
10173 TRPMEVENT enmType;
10174 uint32_t uErrCode;
10175 RTGCUINTPTR uCr2;
10176 uint8_t cbInstr;
10177 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10178 if (RT_FAILURE(rc))
10179 return rc;
10180
10181 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10182 * ICEBP \#DB injection as a special case. */
10183 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10184#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10185 if (rcStrict == VINF_SVM_VMEXIT)
10186 rcStrict = VINF_SUCCESS;
10187#endif
10188#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10189 if (rcStrict == VINF_VMX_VMEXIT)
10190 rcStrict = VINF_SUCCESS;
10191#endif
10192 /** @todo Are there any other codes that imply the event was successfully
10193 * delivered to the guest? See @bugref{6607}. */
10194 if ( rcStrict == VINF_SUCCESS
10195 || rcStrict == VINF_IEM_RAISED_XCPT)
10196 TRPMResetTrap(pVCpu);
10197
10198 return rcStrict;
10199#endif
10200}
10201
10202
10203VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10204{
10205 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10206 return VERR_NOT_IMPLEMENTED;
10207}
10208
10209
10210VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10211{
10212 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10213 return VERR_NOT_IMPLEMENTED;
10214}
10215
10216
10217#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10218/**
10219 * Executes a IRET instruction with default operand size.
10220 *
10221 * This is for PATM.
10222 *
10223 * @returns VBox status code.
10224 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10225 * @param pCtxCore The register frame.
10226 */
10227VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10228{
10229 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10230
10231 iemCtxCoreToCtx(pCtx, pCtxCore);
10232 iemInitDecoder(pVCpu);
10233 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10234 if (rcStrict == VINF_SUCCESS)
10235 iemCtxToCtxCore(pCtxCore, pCtx);
10236 else
10237 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10238 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10239 return rcStrict;
10240}
10241#endif
10242
10243
10244/**
10245 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10246 *
10247 * This API ASSUMES that the caller has already verified that the guest code is
10248 * allowed to access the I/O port. (The I/O port is in the DX register in the
10249 * guest state.)
10250 *
10251 * @returns Strict VBox status code.
10252 * @param pVCpu The cross context virtual CPU structure.
10253 * @param cbValue The size of the I/O port access (1, 2, or 4).
10254 * @param enmAddrMode The addressing mode.
10255 * @param fRepPrefix Indicates whether a repeat prefix is used
10256 * (doesn't matter which for this instruction).
10257 * @param cbInstr The instruction length in bytes.
10258 * @param iEffSeg The effective segment address.
10259 * @param fIoChecked Whether the access to the I/O port has been
10260 * checked or not. It's typically checked in the
10261 * HM scenario.
10262 */
10263VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10264 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10265{
10266 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10267 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10268
10269 /*
10270 * State init.
10271 */
10272 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10273
10274 /*
10275 * Switch orgy for getting to the right handler.
10276 */
10277 VBOXSTRICTRC rcStrict;
10278 if (fRepPrefix)
10279 {
10280 switch (enmAddrMode)
10281 {
10282 case IEMMODE_16BIT:
10283 switch (cbValue)
10284 {
10285 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10286 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10287 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10288 default:
10289 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10290 }
10291 break;
10292
10293 case IEMMODE_32BIT:
10294 switch (cbValue)
10295 {
10296 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10297 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10298 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10299 default:
10300 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10301 }
10302 break;
10303
10304 case IEMMODE_64BIT:
10305 switch (cbValue)
10306 {
10307 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10308 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10309 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10310 default:
10311 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10312 }
10313 break;
10314
10315 default:
10316 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10317 }
10318 }
10319 else
10320 {
10321 switch (enmAddrMode)
10322 {
10323 case IEMMODE_16BIT:
10324 switch (cbValue)
10325 {
10326 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10327 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10328 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10329 default:
10330 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10331 }
10332 break;
10333
10334 case IEMMODE_32BIT:
10335 switch (cbValue)
10336 {
10337 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10338 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10339 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10340 default:
10341 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10342 }
10343 break;
10344
10345 case IEMMODE_64BIT:
10346 switch (cbValue)
10347 {
10348 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10349 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10350 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10351 default:
10352 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10353 }
10354 break;
10355
10356 default:
10357 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10358 }
10359 }
10360
10361 if (pVCpu->iem.s.cActiveMappings)
10362 iemMemRollback(pVCpu);
10363
10364 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10365}
10366
10367
10368/**
10369 * Interface for HM and EM for executing string I/O IN (read) instructions.
10370 *
10371 * This API ASSUMES that the caller has already verified that the guest code is
10372 * allowed to access the I/O port. (The I/O port is in the DX register in the
10373 * guest state.)
10374 *
10375 * @returns Strict VBox status code.
10376 * @param pVCpu The cross context virtual CPU structure.
10377 * @param cbValue The size of the I/O port access (1, 2, or 4).
10378 * @param enmAddrMode The addressing mode.
10379 * @param fRepPrefix Indicates whether a repeat prefix is used
10380 * (doesn't matter which for this instruction).
10381 * @param cbInstr The instruction length in bytes.
10382 * @param fIoChecked Whether the access to the I/O port has been
10383 * checked or not. It's typically checked in the
10384 * HM scenario.
10385 */
10386VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10387 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10388{
10389 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10390
10391 /*
10392 * State init.
10393 */
10394 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10395
10396 /*
10397 * Switch orgy for getting to the right handler.
10398 */
10399 VBOXSTRICTRC rcStrict;
10400 if (fRepPrefix)
10401 {
10402 switch (enmAddrMode)
10403 {
10404 case IEMMODE_16BIT:
10405 switch (cbValue)
10406 {
10407 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10408 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10409 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10410 default:
10411 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10412 }
10413 break;
10414
10415 case IEMMODE_32BIT:
10416 switch (cbValue)
10417 {
10418 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10419 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10420 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10421 default:
10422 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10423 }
10424 break;
10425
10426 case IEMMODE_64BIT:
10427 switch (cbValue)
10428 {
10429 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10430 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10431 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10432 default:
10433 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10434 }
10435 break;
10436
10437 default:
10438 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10439 }
10440 }
10441 else
10442 {
10443 switch (enmAddrMode)
10444 {
10445 case IEMMODE_16BIT:
10446 switch (cbValue)
10447 {
10448 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10449 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10450 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10451 default:
10452 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10453 }
10454 break;
10455
10456 case IEMMODE_32BIT:
10457 switch (cbValue)
10458 {
10459 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10460 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10461 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10462 default:
10463 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10464 }
10465 break;
10466
10467 case IEMMODE_64BIT:
10468 switch (cbValue)
10469 {
10470 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10471 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10472 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10473 default:
10474 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10475 }
10476 break;
10477
10478 default:
10479 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10480 }
10481 }
10482
10483 if ( pVCpu->iem.s.cActiveMappings == 0
10484 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10485 { /* likely */ }
10486 else
10487 {
10488 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10489 iemMemRollback(pVCpu);
10490 }
10491 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10492}
10493
10494
10495/**
10496 * Interface for rawmode to write execute an OUT instruction.
10497 *
10498 * @returns Strict VBox status code.
10499 * @param pVCpu The cross context virtual CPU structure.
10500 * @param cbInstr The instruction length in bytes.
10501 * @param u16Port The port to read.
10502 * @param fImm Whether the port is specified using an immediate operand or
10503 * using the implicit DX register.
10504 * @param cbReg The register size.
10505 *
10506 * @remarks In ring-0 not all of the state needs to be synced in.
10507 */
10508VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10509{
10510 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10511 Assert(cbReg <= 4 && cbReg != 3);
10512
10513 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10514 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10515 Assert(!pVCpu->iem.s.cActiveMappings);
10516 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10517}
10518
10519
10520/**
10521 * Interface for rawmode to write execute an IN instruction.
10522 *
10523 * @returns Strict VBox status code.
10524 * @param pVCpu The cross context virtual CPU structure.
10525 * @param cbInstr The instruction length in bytes.
10526 * @param u16Port The port to read.
10527 * @param fImm Whether the port is specified using an immediate operand or
10528 * using the implicit DX.
10529 * @param cbReg The register size.
10530 */
10531VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10532{
10533 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10534 Assert(cbReg <= 4 && cbReg != 3);
10535
10536 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10537 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10538 Assert(!pVCpu->iem.s.cActiveMappings);
10539 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10540}
10541
10542
10543/**
10544 * Interface for HM and EM to write to a CRx register.
10545 *
10546 * @returns Strict VBox status code.
10547 * @param pVCpu The cross context virtual CPU structure.
10548 * @param cbInstr The instruction length in bytes.
10549 * @param iCrReg The control register number (destination).
10550 * @param iGReg The general purpose register number (source).
10551 *
10552 * @remarks In ring-0 not all of the state needs to be synced in.
10553 */
10554VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10555{
10556 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10557 Assert(iCrReg < 16);
10558 Assert(iGReg < 16);
10559
10560 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10561 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10562 Assert(!pVCpu->iem.s.cActiveMappings);
10563 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10564}
10565
10566
10567/**
10568 * Interface for HM and EM to read from a CRx register.
10569 *
10570 * @returns Strict VBox status code.
10571 * @param pVCpu The cross context virtual CPU structure.
10572 * @param cbInstr The instruction length in bytes.
10573 * @param iGReg The general purpose register number (destination).
10574 * @param iCrReg The control register number (source).
10575 *
10576 * @remarks In ring-0 not all of the state needs to be synced in.
10577 */
10578VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10579{
10580 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10581 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10582 | CPUMCTX_EXTRN_APIC_TPR);
10583 Assert(iCrReg < 16);
10584 Assert(iGReg < 16);
10585
10586 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10587 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10588 Assert(!pVCpu->iem.s.cActiveMappings);
10589 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10590}
10591
10592
10593/**
10594 * Interface for HM and EM to clear the CR0[TS] bit.
10595 *
10596 * @returns Strict VBox status code.
10597 * @param pVCpu The cross context virtual CPU structure.
10598 * @param cbInstr The instruction length in bytes.
10599 *
10600 * @remarks In ring-0 not all of the state needs to be synced in.
10601 */
10602VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10603{
10604 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10605
10606 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10607 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10608 Assert(!pVCpu->iem.s.cActiveMappings);
10609 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10610}
10611
10612
10613/**
10614 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10615 *
10616 * @returns Strict VBox status code.
10617 * @param pVCpu The cross context virtual CPU structure.
10618 * @param cbInstr The instruction length in bytes.
10619 * @param uValue The value to load into CR0.
10620 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10621 * memory operand. Otherwise pass NIL_RTGCPTR.
10622 *
10623 * @remarks In ring-0 not all of the state needs to be synced in.
10624 */
10625VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10626{
10627 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10628
10629 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10630 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10631 Assert(!pVCpu->iem.s.cActiveMappings);
10632 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10633}
10634
10635
10636/**
10637 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10638 *
10639 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10640 *
10641 * @returns Strict VBox status code.
10642 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10643 * @param cbInstr The instruction length in bytes.
10644 * @remarks In ring-0 not all of the state needs to be synced in.
10645 * @thread EMT(pVCpu)
10646 */
10647VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10648{
10649 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10650
10651 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10652 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10653 Assert(!pVCpu->iem.s.cActiveMappings);
10654 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10655}
10656
10657
10658/**
10659 * Interface for HM and EM to emulate the WBINVD instruction.
10660 *
10661 * @returns Strict VBox status code.
10662 * @param pVCpu The cross context virtual CPU structure.
10663 * @param cbInstr The instruction length in bytes.
10664 *
10665 * @remarks In ring-0 not all of the state needs to be synced in.
10666 */
10667VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10668{
10669 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10670
10671 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10672 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10673 Assert(!pVCpu->iem.s.cActiveMappings);
10674 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10675}
10676
10677
10678/**
10679 * Interface for HM and EM to emulate the INVD instruction.
10680 *
10681 * @returns Strict VBox status code.
10682 * @param pVCpu The cross context virtual CPU structure.
10683 * @param cbInstr The instruction length in bytes.
10684 *
10685 * @remarks In ring-0 not all of the state needs to be synced in.
10686 */
10687VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10688{
10689 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10690
10691 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10692 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10693 Assert(!pVCpu->iem.s.cActiveMappings);
10694 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10695}
10696
10697
10698/**
10699 * Interface for HM and EM to emulate the INVLPG instruction.
10700 *
10701 * @returns Strict VBox status code.
10702 * @retval VINF_PGM_SYNC_CR3
10703 *
10704 * @param pVCpu The cross context virtual CPU structure.
10705 * @param cbInstr The instruction length in bytes.
10706 * @param GCPtrPage The effective address of the page to invalidate.
10707 *
10708 * @remarks In ring-0 not all of the state needs to be synced in.
10709 */
10710VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10711{
10712 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10713
10714 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10715 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10716 Assert(!pVCpu->iem.s.cActiveMappings);
10717 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10718}
10719
10720
10721/**
10722 * Interface for HM and EM to emulate the INVPCID instruction.
10723 *
10724 * @returns Strict VBox status code.
10725 * @retval VINF_PGM_SYNC_CR3
10726 *
10727 * @param pVCpu The cross context virtual CPU structure.
10728 * @param cbInstr The instruction length in bytes.
10729 * @param iEffSeg The effective segment register.
10730 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10731 * @param uType The invalidation type.
10732 *
10733 * @remarks In ring-0 not all of the state needs to be synced in.
10734 */
10735VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10736 uint64_t uType)
10737{
10738 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10739
10740 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10741 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10742 Assert(!pVCpu->iem.s.cActiveMappings);
10743 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10744}
10745
10746
10747/**
10748 * Interface for HM and EM to emulate the CPUID instruction.
10749 *
10750 * @returns Strict VBox status code.
10751 *
10752 * @param pVCpu The cross context virtual CPU structure.
10753 * @param cbInstr The instruction length in bytes.
10754 *
10755 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10756 */
10757VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10758{
10759 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10760 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10761
10762 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10763 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10764 Assert(!pVCpu->iem.s.cActiveMappings);
10765 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10766}
10767
10768
10769/**
10770 * Interface for HM and EM to emulate the RDPMC instruction.
10771 *
10772 * @returns Strict VBox status code.
10773 *
10774 * @param pVCpu The cross context virtual CPU structure.
10775 * @param cbInstr The instruction length in bytes.
10776 *
10777 * @remarks Not all of the state needs to be synced in.
10778 */
10779VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10780{
10781 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10782 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10783
10784 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10785 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10786 Assert(!pVCpu->iem.s.cActiveMappings);
10787 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10788}
10789
10790
10791/**
10792 * Interface for HM and EM to emulate the RDTSC instruction.
10793 *
10794 * @returns Strict VBox status code.
10795 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10796 *
10797 * @param pVCpu The cross context virtual CPU structure.
10798 * @param cbInstr The instruction length in bytes.
10799 *
10800 * @remarks Not all of the state needs to be synced in.
10801 */
10802VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10803{
10804 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10805 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10806
10807 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10808 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10809 Assert(!pVCpu->iem.s.cActiveMappings);
10810 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10811}
10812
10813
10814/**
10815 * Interface for HM and EM to emulate the RDTSCP instruction.
10816 *
10817 * @returns Strict VBox status code.
10818 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10819 *
10820 * @param pVCpu The cross context virtual CPU structure.
10821 * @param cbInstr The instruction length in bytes.
10822 *
10823 * @remarks Not all of the state needs to be synced in. Recommended
10824 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10825 */
10826VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10827{
10828 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10829 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10830
10831 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10832 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10833 Assert(!pVCpu->iem.s.cActiveMappings);
10834 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10835}
10836
10837
10838/**
10839 * Interface for HM and EM to emulate the RDMSR instruction.
10840 *
10841 * @returns Strict VBox status code.
10842 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10843 *
10844 * @param pVCpu The cross context virtual CPU structure.
10845 * @param cbInstr The instruction length in bytes.
10846 *
10847 * @remarks Not all of the state needs to be synced in. Requires RCX and
10848 * (currently) all MSRs.
10849 */
10850VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10851{
10852 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10853 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10854
10855 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10856 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10857 Assert(!pVCpu->iem.s.cActiveMappings);
10858 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10859}
10860
10861
10862/**
10863 * Interface for HM and EM to emulate the WRMSR instruction.
10864 *
10865 * @returns Strict VBox status code.
10866 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10867 *
10868 * @param pVCpu The cross context virtual CPU structure.
10869 * @param cbInstr The instruction length in bytes.
10870 *
10871 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10872 * and (currently) all MSRs.
10873 */
10874VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10875{
10876 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10877 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10878 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10879
10880 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10881 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10882 Assert(!pVCpu->iem.s.cActiveMappings);
10883 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10884}
10885
10886
10887/**
10888 * Interface for HM and EM to emulate the MONITOR instruction.
10889 *
10890 * @returns Strict VBox status code.
10891 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10892 *
10893 * @param pVCpu The cross context virtual CPU structure.
10894 * @param cbInstr The instruction length in bytes.
10895 *
10896 * @remarks Not all of the state needs to be synced in.
10897 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10898 * are used.
10899 */
10900VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10901{
10902 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10903 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10904
10905 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10906 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10907 Assert(!pVCpu->iem.s.cActiveMappings);
10908 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10909}
10910
10911
10912/**
10913 * Interface for HM and EM to emulate the MWAIT instruction.
10914 *
10915 * @returns Strict VBox status code.
10916 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10917 *
10918 * @param pVCpu The cross context virtual CPU structure.
10919 * @param cbInstr The instruction length in bytes.
10920 *
10921 * @remarks Not all of the state needs to be synced in.
10922 */
10923VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10924{
10925 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10926 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10927
10928 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10929 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10930 Assert(!pVCpu->iem.s.cActiveMappings);
10931 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10932}
10933
10934
10935/**
10936 * Interface for HM and EM to emulate the HLT instruction.
10937 *
10938 * @returns Strict VBox status code.
10939 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10940 *
10941 * @param pVCpu The cross context virtual CPU structure.
10942 * @param cbInstr The instruction length in bytes.
10943 *
10944 * @remarks Not all of the state needs to be synced in.
10945 */
10946VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10947{
10948 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10949
10950 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10951 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10952 Assert(!pVCpu->iem.s.cActiveMappings);
10953 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10954}
10955
10956
10957/**
10958 * Checks if IEM is in the process of delivering an event (interrupt or
10959 * exception).
10960 *
10961 * @returns true if we're in the process of raising an interrupt or exception,
10962 * false otherwise.
10963 * @param pVCpu The cross context virtual CPU structure.
10964 * @param puVector Where to store the vector associated with the
10965 * currently delivered event, optional.
10966 * @param pfFlags Where to store th event delivery flags (see
10967 * IEM_XCPT_FLAGS_XXX), optional.
10968 * @param puErr Where to store the error code associated with the
10969 * event, optional.
10970 * @param puCr2 Where to store the CR2 associated with the event,
10971 * optional.
10972 * @remarks The caller should check the flags to determine if the error code and
10973 * CR2 are valid for the event.
10974 */
10975VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10976{
10977 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10978 if (fRaisingXcpt)
10979 {
10980 if (puVector)
10981 *puVector = pVCpu->iem.s.uCurXcpt;
10982 if (pfFlags)
10983 *pfFlags = pVCpu->iem.s.fCurXcpt;
10984 if (puErr)
10985 *puErr = pVCpu->iem.s.uCurXcptErr;
10986 if (puCr2)
10987 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10988 }
10989 return fRaisingXcpt;
10990}
10991
10992#ifdef IN_RING3
10993
10994/**
10995 * Handles the unlikely and probably fatal merge cases.
10996 *
10997 * @returns Merged status code.
10998 * @param rcStrict Current EM status code.
10999 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11000 * with @a rcStrict.
11001 * @param iMemMap The memory mapping index. For error reporting only.
11002 * @param pVCpu The cross context virtual CPU structure of the calling
11003 * thread, for error reporting only.
11004 */
11005DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11006 unsigned iMemMap, PVMCPUCC pVCpu)
11007{
11008 if (RT_FAILURE_NP(rcStrict))
11009 return rcStrict;
11010
11011 if (RT_FAILURE_NP(rcStrictCommit))
11012 return rcStrictCommit;
11013
11014 if (rcStrict == rcStrictCommit)
11015 return rcStrictCommit;
11016
11017 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11018 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11019 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11020 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11021 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11022 return VERR_IOM_FF_STATUS_IPE;
11023}
11024
11025
11026/**
11027 * Helper for IOMR3ProcessForceFlag.
11028 *
11029 * @returns Merged status code.
11030 * @param rcStrict Current EM status code.
11031 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11032 * with @a rcStrict.
11033 * @param iMemMap The memory mapping index. For error reporting only.
11034 * @param pVCpu The cross context virtual CPU structure of the calling
11035 * thread, for error reporting only.
11036 */
11037DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11038{
11039 /* Simple. */
11040 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11041 return rcStrictCommit;
11042
11043 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11044 return rcStrict;
11045
11046 /* EM scheduling status codes. */
11047 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11048 && rcStrict <= VINF_EM_LAST))
11049 {
11050 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11051 && rcStrictCommit <= VINF_EM_LAST))
11052 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11053 }
11054
11055 /* Unlikely */
11056 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11057}
11058
11059
11060/**
11061 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11062 *
11063 * @returns Merge between @a rcStrict and what the commit operation returned.
11064 * @param pVM The cross context VM structure.
11065 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11066 * @param rcStrict The status code returned by ring-0 or raw-mode.
11067 */
11068VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11069{
11070 /*
11071 * Reset the pending commit.
11072 */
11073 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11074 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11075 ("%#x %#x %#x\n",
11076 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11077 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11078
11079 /*
11080 * Commit the pending bounce buffers (usually just one).
11081 */
11082 unsigned cBufs = 0;
11083 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11084 while (iMemMap-- > 0)
11085 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11086 {
11087 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11088 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11089 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11090
11091 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11092 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11093 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11094
11095 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11096 {
11097 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11098 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11099 pbBuf,
11100 cbFirst,
11101 PGMACCESSORIGIN_IEM);
11102 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11103 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11104 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11105 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11106 }
11107
11108 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11109 {
11110 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11111 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11112 pbBuf + cbFirst,
11113 cbSecond,
11114 PGMACCESSORIGIN_IEM);
11115 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11116 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11117 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11118 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11119 }
11120 cBufs++;
11121 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11122 }
11123
11124 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11125 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11126 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11127 pVCpu->iem.s.cActiveMappings = 0;
11128 return rcStrict;
11129}
11130
11131#endif /* IN_RING3 */
11132
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette