VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 99208

Last change on this file since 99208 was 98795, checked in by vboxsync, 21 months ago

VMM/IEM: s/g_apfnOneByteMap/g_apfnIemInterpretOnlyOneByteMap/ and other changes relating to compiling the instruction decoding and emulation bits more than once. bugref:10368

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 467.9 KB
Line 
1/* $Id: IEMAll.cpp 98795 2023-03-01 00:05:10Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) : Memory writes.
82 * - Level 9 (Log9) : Memory reads.
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
87 * - Level 1 (Log) : Errors and other major events.
88 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
89 * - Level 2 (Log2) : VM exits.
90 */
91
92/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
93#ifdef _MSC_VER
94# pragma warning(disable:4505)
95#endif
96
97
98/*********************************************************************************************************************************
99* Header Files *
100*********************************************************************************************************************************/
101#define LOG_GROUP LOG_GROUP_IEM
102#define VMCPU_INCL_CPUM_GST_CTX
103#include <VBox/vmm/iem.h>
104#include <VBox/vmm/cpum.h>
105#include <VBox/vmm/apic.h>
106#include <VBox/vmm/pdm.h>
107#include <VBox/vmm/pgm.h>
108#include <VBox/vmm/iom.h>
109#include <VBox/vmm/em.h>
110#include <VBox/vmm/hm.h>
111#include <VBox/vmm/nem.h>
112#include <VBox/vmm/gim.h>
113#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
114# include <VBox/vmm/em.h>
115# include <VBox/vmm/hm_svm.h>
116#endif
117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
118# include <VBox/vmm/hmvmxinline.h>
119#endif
120#include <VBox/vmm/tm.h>
121#include <VBox/vmm/dbgf.h>
122#include <VBox/vmm/dbgftrace.h>
123#include "IEMInternal.h"
124#include <VBox/vmm/vmcc.h>
125#include <VBox/log.h>
126#include <VBox/err.h>
127#include <VBox/param.h>
128#include <VBox/dis.h>
129#include <VBox/disopcode.h>
130#include <iprt/asm-math.h>
131#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
132# include <iprt/asm-amd64-x86.h>
133#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
134# include <iprt/asm-arm.h>
135#endif
136#include <iprt/assert.h>
137#include <iprt/string.h>
138#include <iprt/x86.h>
139
140#include "IEMInline.h"
141
142
143/*********************************************************************************************************************************
144* Structures and Typedefs *
145*********************************************************************************************************************************/
146/**
147 * CPU exception classes.
148 */
149typedef enum IEMXCPTCLASS
150{
151 IEMXCPTCLASS_BENIGN,
152 IEMXCPTCLASS_CONTRIBUTORY,
153 IEMXCPTCLASS_PAGE_FAULT,
154 IEMXCPTCLASS_DOUBLE_FAULT
155} IEMXCPTCLASS;
156
157
158/*********************************************************************************************************************************
159* Global Variables *
160*********************************************************************************************************************************/
161#if defined(IEM_LOG_MEMORY_WRITES)
162/** What IEM just wrote. */
163uint8_t g_abIemWrote[256];
164/** How much IEM just wrote. */
165size_t g_cbIemWrote;
166#endif
167
168
169/*********************************************************************************************************************************
170* Internal Functions *
171*********************************************************************************************************************************/
172static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
173 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
174
175
176/**
177 * Slow path of iemInitDecoder() and iemInitExec() that checks what kind of
178 * breakpoints are enabled.
179 *
180 * @param pVCpu The cross context virtual CPU structure of the
181 * calling thread.
182 */
183void iemInitPendingBreakpointsSlow(PVMCPUCC pVCpu)
184{
185 /*
186 * Process guest breakpoints.
187 */
188#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
189 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
190 { \
191 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
192 { \
193 case X86_DR7_RW_EO: \
194 pVCpu->iem.s.fPendingInstructionBreakpoints = true; \
195 break; \
196 case X86_DR7_RW_WO: \
197 case X86_DR7_RW_RW: \
198 pVCpu->iem.s.fPendingDataBreakpoints = true; \
199 break; \
200 case X86_DR7_RW_IO: \
201 pVCpu->iem.s.fPendingIoBreakpoints = true; \
202 break; \
203 } \
204 } \
205 } while (0)
206 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
207 if (fGstDr7 & X86_DR7_ENABLED_MASK)
208 {
209 PROCESS_ONE_BP(fGstDr7, 0);
210 PROCESS_ONE_BP(fGstDr7, 1);
211 PROCESS_ONE_BP(fGstDr7, 2);
212 PROCESS_ONE_BP(fGstDr7, 3);
213 }
214
215 /*
216 * Process hypervisor breakpoints.
217 */
218 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
219 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
220 {
221 PROCESS_ONE_BP(fHyperDr7, 0);
222 PROCESS_ONE_BP(fHyperDr7, 1);
223 PROCESS_ONE_BP(fHyperDr7, 2);
224 PROCESS_ONE_BP(fHyperDr7, 3);
225 }
226}
227
228
229/**
230 * Initializes the decoder state.
231 *
232 * iemReInitDecoder is mostly a copy of this function.
233 *
234 * @param pVCpu The cross context virtual CPU structure of the
235 * calling thread.
236 * @param fBypassHandlers Whether to bypass access handlers.
237 * @param fDisregardLock Whether to disregard the LOCK prefix.
238 */
239DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
240{
241 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
242 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
243 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
244 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
245 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
246 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
247 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
249 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
251
252 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
253 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
254 pVCpu->iem.s.enmCpuMode = enmMode;
255 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
256 pVCpu->iem.s.enmEffAddrMode = enmMode;
257 if (enmMode != IEMMODE_64BIT)
258 {
259 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
260 pVCpu->iem.s.enmEffOpSize = enmMode;
261 }
262 else
263 {
264 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
265 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
266 }
267 pVCpu->iem.s.fPrefixes = 0;
268 pVCpu->iem.s.uRexReg = 0;
269 pVCpu->iem.s.uRexB = 0;
270 pVCpu->iem.s.uRexIndex = 0;
271 pVCpu->iem.s.idxPrefix = 0;
272 pVCpu->iem.s.uVex3rdReg = 0;
273 pVCpu->iem.s.uVexLength = 0;
274 pVCpu->iem.s.fEvexStuff = 0;
275 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
276#ifdef IEM_WITH_CODE_TLB
277 pVCpu->iem.s.pbInstrBuf = NULL;
278 pVCpu->iem.s.offInstrNextByte = 0;
279 pVCpu->iem.s.offCurInstrStart = 0;
280# ifdef VBOX_STRICT
281 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
282 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
283 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
284# endif
285#else
286 pVCpu->iem.s.offOpcode = 0;
287 pVCpu->iem.s.cbOpcode = 0;
288#endif
289 pVCpu->iem.s.offModRm = 0;
290 pVCpu->iem.s.cActiveMappings = 0;
291 pVCpu->iem.s.iNextMapping = 0;
292 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
293 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
294 pVCpu->iem.s.fDisregardLock = fDisregardLock;
295 pVCpu->iem.s.fPendingInstructionBreakpoints = false;
296 pVCpu->iem.s.fPendingDataBreakpoints = false;
297 pVCpu->iem.s.fPendingIoBreakpoints = false;
298 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
299 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
300 { /* likely */ }
301 else
302 iemInitPendingBreakpointsSlow(pVCpu);
303
304#ifdef DBGFTRACE_ENABLED
305 switch (enmMode)
306 {
307 case IEMMODE_64BIT:
308 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
309 break;
310 case IEMMODE_32BIT:
311 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
312 break;
313 case IEMMODE_16BIT:
314 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
315 break;
316 }
317#endif
318}
319
320
321/**
322 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
323 *
324 * This is mostly a copy of iemInitDecoder.
325 *
326 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
327 */
328DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
329{
330 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
332 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
333 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
334 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
335 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
336 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
337 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
338 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
339
340 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
341 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
342 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
343 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
344 pVCpu->iem.s.enmEffAddrMode = enmMode;
345 if (enmMode != IEMMODE_64BIT)
346 {
347 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
348 pVCpu->iem.s.enmEffOpSize = enmMode;
349 }
350 else
351 {
352 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
353 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
354 }
355 pVCpu->iem.s.fPrefixes = 0;
356 pVCpu->iem.s.uRexReg = 0;
357 pVCpu->iem.s.uRexB = 0;
358 pVCpu->iem.s.uRexIndex = 0;
359 pVCpu->iem.s.idxPrefix = 0;
360 pVCpu->iem.s.uVex3rdReg = 0;
361 pVCpu->iem.s.uVexLength = 0;
362 pVCpu->iem.s.fEvexStuff = 0;
363 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
364#ifdef IEM_WITH_CODE_TLB
365 if (pVCpu->iem.s.pbInstrBuf)
366 {
367 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
368 ? pVCpu->cpum.GstCtx.rip
369 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
370 - pVCpu->iem.s.uInstrBufPc;
371 if (off < pVCpu->iem.s.cbInstrBufTotal)
372 {
373 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
374 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
375 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
376 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
377 else
378 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
379 }
380 else
381 {
382 pVCpu->iem.s.pbInstrBuf = NULL;
383 pVCpu->iem.s.offInstrNextByte = 0;
384 pVCpu->iem.s.offCurInstrStart = 0;
385 pVCpu->iem.s.cbInstrBuf = 0;
386 pVCpu->iem.s.cbInstrBufTotal = 0;
387 }
388 }
389 else
390 {
391 pVCpu->iem.s.offInstrNextByte = 0;
392 pVCpu->iem.s.offCurInstrStart = 0;
393 pVCpu->iem.s.cbInstrBuf = 0;
394 pVCpu->iem.s.cbInstrBufTotal = 0;
395 }
396#else
397 pVCpu->iem.s.cbOpcode = 0;
398 pVCpu->iem.s.offOpcode = 0;
399#endif
400 pVCpu->iem.s.offModRm = 0;
401 Assert(pVCpu->iem.s.cActiveMappings == 0);
402 pVCpu->iem.s.iNextMapping = 0;
403 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
404 Assert(pVCpu->iem.s.fBypassHandlers == false);
405
406#ifdef DBGFTRACE_ENABLED
407 switch (enmMode)
408 {
409 case IEMMODE_64BIT:
410 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
411 break;
412 case IEMMODE_32BIT:
413 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
414 break;
415 case IEMMODE_16BIT:
416 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
417 break;
418 }
419#endif
420}
421
422
423
424/**
425 * Prefetch opcodes the first time when starting executing.
426 *
427 * @returns Strict VBox status code.
428 * @param pVCpu The cross context virtual CPU structure of the
429 * calling thread.
430 * @param fBypassHandlers Whether to bypass access handlers.
431 * @param fDisregardLock Whether to disregard LOCK prefixes.
432 *
433 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
434 * store them as such.
435 */
436static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
437{
438 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
439
440#ifndef IEM_WITH_CODE_TLB
441 /*
442 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
443 *
444 * First translate CS:rIP to a physical address.
445 *
446 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
447 * all relevant bytes from the first page, as it ASSUMES it's only ever
448 * called for dealing with CS.LIM, page crossing and instructions that
449 * are too long.
450 */
451 uint32_t cbToTryRead;
452 RTGCPTR GCPtrPC;
453 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
454 {
455 cbToTryRead = GUEST_PAGE_SIZE;
456 GCPtrPC = pVCpu->cpum.GstCtx.rip;
457 if (IEM_IS_CANONICAL(GCPtrPC))
458 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
459 else
460 return iemRaiseGeneralProtectionFault0(pVCpu);
461 }
462 else
463 {
464 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
465 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
466 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
467 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
468 else
469 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
470 if (cbToTryRead) { /* likely */ }
471 else /* overflowed */
472 {
473 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
474 cbToTryRead = UINT32_MAX;
475 }
476 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
477 Assert(GCPtrPC <= UINT32_MAX);
478 }
479
480 PGMPTWALK Walk;
481 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
482 if (RT_SUCCESS(rc))
483 Assert(Walk.fSucceeded); /* probable. */
484 else
485 {
486 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
487# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
488 if (Walk.fFailed & PGM_WALKFAIL_EPT)
489 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
490# endif
491 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
492 }
493 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
494 else
495 {
496 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
497# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
498 if (Walk.fFailed & PGM_WALKFAIL_EPT)
499 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
500# endif
501 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
502 }
503 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
504 else
505 {
506 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
507# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
508 if (Walk.fFailed & PGM_WALKFAIL_EPT)
509 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
510# endif
511 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
512 }
513 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
514 /** @todo Check reserved bits and such stuff. PGM is better at doing
515 * that, so do it when implementing the guest virtual address
516 * TLB... */
517
518 /*
519 * Read the bytes at this address.
520 */
521 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
522 if (cbToTryRead > cbLeftOnPage)
523 cbToTryRead = cbLeftOnPage;
524 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
525 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
526
527 if (!pVCpu->iem.s.fBypassHandlers)
528 {
529 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
530 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
531 { /* likely */ }
532 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
533 {
534 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
535 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
536 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
537 }
538 else
539 {
540 Log((RT_SUCCESS(rcStrict)
541 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
542 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
543 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
544 return rcStrict;
545 }
546 }
547 else
548 {
549 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
550 if (RT_SUCCESS(rc))
551 { /* likely */ }
552 else
553 {
554 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
555 GCPtrPC, GCPhys, rc, cbToTryRead));
556 return rc;
557 }
558 }
559 pVCpu->iem.s.cbOpcode = cbToTryRead;
560#endif /* !IEM_WITH_CODE_TLB */
561 return VINF_SUCCESS;
562}
563
564
565/**
566 * Invalidates the IEM TLBs.
567 *
568 * This is called internally as well as by PGM when moving GC mappings.
569 *
570 * @returns
571 * @param pVCpu The cross context virtual CPU structure of the calling
572 * thread.
573 */
574VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
575{
576#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
577 Log10(("IEMTlbInvalidateAll\n"));
578# ifdef IEM_WITH_CODE_TLB
579 pVCpu->iem.s.cbInstrBufTotal = 0;
580 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
581 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
582 { /* very likely */ }
583 else
584 {
585 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
586 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
587 while (i-- > 0)
588 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
589 }
590# endif
591
592# ifdef IEM_WITH_DATA_TLB
593 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
594 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
595 { /* very likely */ }
596 else
597 {
598 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
599 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
600 while (i-- > 0)
601 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
602 }
603# endif
604#else
605 RT_NOREF(pVCpu);
606#endif
607}
608
609
610/**
611 * Invalidates a page in the TLBs.
612 *
613 * @param pVCpu The cross context virtual CPU structure of the calling
614 * thread.
615 * @param GCPtr The address of the page to invalidate
616 * @thread EMT(pVCpu)
617 */
618VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
619{
620#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
621 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
622 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
623 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
624 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
625
626# ifdef IEM_WITH_CODE_TLB
627 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
628 {
629 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
630 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
631 pVCpu->iem.s.cbInstrBufTotal = 0;
632 }
633# endif
634
635# ifdef IEM_WITH_DATA_TLB
636 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
637 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
638# endif
639#else
640 NOREF(pVCpu); NOREF(GCPtr);
641#endif
642}
643
644
645#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
646/**
647 * Invalid both TLBs slow fashion following a rollover.
648 *
649 * Worker for IEMTlbInvalidateAllPhysical,
650 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
651 * iemMemMapJmp and others.
652 *
653 * @thread EMT(pVCpu)
654 */
655static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
656{
657 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
658 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
659 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
660
661 unsigned i;
662# ifdef IEM_WITH_CODE_TLB
663 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
664 while (i-- > 0)
665 {
666 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
667 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
668 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
669 }
670# endif
671# ifdef IEM_WITH_DATA_TLB
672 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
673 while (i-- > 0)
674 {
675 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
676 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
677 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
678 }
679# endif
680
681}
682#endif
683
684
685/**
686 * Invalidates the host physical aspects of the IEM TLBs.
687 *
688 * This is called internally as well as by PGM when moving GC mappings.
689 *
690 * @param pVCpu The cross context virtual CPU structure of the calling
691 * thread.
692 * @note Currently not used.
693 */
694VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
695{
696#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
697 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
698 Log10(("IEMTlbInvalidateAllPhysical\n"));
699
700# ifdef IEM_WITH_CODE_TLB
701 pVCpu->iem.s.cbInstrBufTotal = 0;
702# endif
703 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
704 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
705 {
706 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
707 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
708 }
709 else
710 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
711#else
712 NOREF(pVCpu);
713#endif
714}
715
716
717/**
718 * Invalidates the host physical aspects of the IEM TLBs.
719 *
720 * This is called internally as well as by PGM when moving GC mappings.
721 *
722 * @param pVM The cross context VM structure.
723 * @param idCpuCaller The ID of the calling EMT if available to the caller,
724 * otherwise NIL_VMCPUID.
725 *
726 * @remarks Caller holds the PGM lock.
727 */
728VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
729{
730#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
731 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
732 if (pVCpuCaller)
733 VMCPU_ASSERT_EMT(pVCpuCaller);
734 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
735
736 VMCC_FOR_EACH_VMCPU(pVM)
737 {
738# ifdef IEM_WITH_CODE_TLB
739 if (pVCpuCaller == pVCpu)
740 pVCpu->iem.s.cbInstrBufTotal = 0;
741# endif
742
743 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
744 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
745 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
746 { /* likely */}
747 else if (pVCpuCaller == pVCpu)
748 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
749 else
750 {
751 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
752 continue;
753 }
754 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
755 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
756 }
757 VMCC_FOR_EACH_VMCPU_END(pVM);
758
759#else
760 RT_NOREF(pVM, idCpuCaller);
761#endif
762}
763
764#ifdef IEM_WITH_CODE_TLB
765
766/**
767 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
768 * failure and jumps.
769 *
770 * We end up here for a number of reasons:
771 * - pbInstrBuf isn't yet initialized.
772 * - Advancing beyond the buffer boundrary (e.g. cross page).
773 * - Advancing beyond the CS segment limit.
774 * - Fetching from non-mappable page (e.g. MMIO).
775 *
776 * @param pVCpu The cross context virtual CPU structure of the
777 * calling thread.
778 * @param pvDst Where to return the bytes.
779 * @param cbDst Number of bytes to read.
780 *
781 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
782 */
783void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
784{
785# ifdef IN_RING3
786 for (;;)
787 {
788 Assert(cbDst <= 8);
789 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
790
791 /*
792 * We might have a partial buffer match, deal with that first to make the
793 * rest simpler. This is the first part of the cross page/buffer case.
794 */
795 if (pVCpu->iem.s.pbInstrBuf != NULL)
796 {
797 if (offBuf < pVCpu->iem.s.cbInstrBuf)
798 {
799 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
800 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
801 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
802
803 cbDst -= cbCopy;
804 pvDst = (uint8_t *)pvDst + cbCopy;
805 offBuf += cbCopy;
806 pVCpu->iem.s.offInstrNextByte += offBuf;
807 }
808 }
809
810 /*
811 * Check segment limit, figuring how much we're allowed to access at this point.
812 *
813 * We will fault immediately if RIP is past the segment limit / in non-canonical
814 * territory. If we do continue, there are one or more bytes to read before we
815 * end up in trouble and we need to do that first before faulting.
816 */
817 RTGCPTR GCPtrFirst;
818 uint32_t cbMaxRead;
819 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
820 {
821 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
822 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
823 { /* likely */ }
824 else
825 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
826 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
827 }
828 else
829 {
830 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
831 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
832 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
833 { /* likely */ }
834 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
835 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
836 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
837 if (cbMaxRead != 0)
838 { /* likely */ }
839 else
840 {
841 /* Overflowed because address is 0 and limit is max. */
842 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
843 cbMaxRead = X86_PAGE_SIZE;
844 }
845 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
846 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
847 if (cbMaxRead2 < cbMaxRead)
848 cbMaxRead = cbMaxRead2;
849 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
850 }
851
852 /*
853 * Get the TLB entry for this piece of code.
854 */
855 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
856 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
857 if (pTlbe->uTag == uTag)
858 {
859 /* likely when executing lots of code, otherwise unlikely */
860# ifdef VBOX_WITH_STATISTICS
861 pVCpu->iem.s.CodeTlb.cTlbHits++;
862# endif
863 }
864 else
865 {
866 pVCpu->iem.s.CodeTlb.cTlbMisses++;
867 PGMPTWALK Walk;
868 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
869 if (RT_FAILURE(rc))
870 {
871#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
872 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
873 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
874#endif
875 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
876 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
877 }
878
879 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
880 Assert(Walk.fSucceeded);
881 pTlbe->uTag = uTag;
882 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
883 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
884 pTlbe->GCPhys = Walk.GCPhys;
885 pTlbe->pbMappingR3 = NULL;
886 }
887
888 /*
889 * Check TLB page table level access flags.
890 */
891 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
892 {
893 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
894 {
895 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
896 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
897 }
898 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
899 {
900 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
901 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
902 }
903 }
904
905 /*
906 * Look up the physical page info if necessary.
907 */
908 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
909 { /* not necessary */ }
910 else
911 {
912 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
913 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
914 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
915 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
916 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
917 { /* likely */ }
918 else
919 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
920 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
921 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
922 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
923 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
924 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
925 }
926
927# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
928 /*
929 * Try do a direct read using the pbMappingR3 pointer.
930 */
931 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
932 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
933 {
934 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
935 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
936 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
937 {
938 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
939 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
940 }
941 else
942 {
943 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
944 if (cbInstr + (uint32_t)cbDst <= 15)
945 {
946 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
947 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
948 }
949 else
950 {
951 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
952 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
953 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
954 }
955 }
956 if (cbDst <= cbMaxRead)
957 {
958 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
959 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
960 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
961 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
962 return;
963 }
964 pVCpu->iem.s.pbInstrBuf = NULL;
965
966 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
967 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
968 }
969# else
970# error "refactor as needed"
971 /*
972 * If there is no special read handling, so we can read a bit more and
973 * put it in the prefetch buffer.
974 */
975 if ( cbDst < cbMaxRead
976 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
977 {
978 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
979 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
980 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
981 { /* likely */ }
982 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
983 {
984 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
985 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
986 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
987 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
988 }
989 else
990 {
991 Log((RT_SUCCESS(rcStrict)
992 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
993 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
994 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
995 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
996 }
997 }
998# endif
999 /*
1000 * Special read handling, so only read exactly what's needed.
1001 * This is a highly unlikely scenario.
1002 */
1003 else
1004 {
1005 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1006
1007 /* Check instruction length. */
1008 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1009 if (RT_LIKELY(cbInstr + cbDst <= 15))
1010 { /* likely */ }
1011 else
1012 {
1013 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1014 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1015 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1016 }
1017
1018 /* Do the reading. */
1019 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1020 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1021 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1022 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1023 { /* likely */ }
1024 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1025 {
1026 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1027 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1028 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1029 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1030 }
1031 else
1032 {
1033 Log((RT_SUCCESS(rcStrict)
1034 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1035 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1036 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1037 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1038 }
1039 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1040 if (cbToRead == cbDst)
1041 return;
1042 }
1043
1044 /*
1045 * More to read, loop.
1046 */
1047 cbDst -= cbMaxRead;
1048 pvDst = (uint8_t *)pvDst + cbMaxRead;
1049 }
1050# else /* !IN_RING3 */
1051 RT_NOREF(pvDst, cbDst);
1052 if (pvDst || cbDst)
1053 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1054# endif /* !IN_RING3 */
1055}
1056
1057#else
1058
1059/**
1060 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1061 * exception if it fails.
1062 *
1063 * @returns Strict VBox status code.
1064 * @param pVCpu The cross context virtual CPU structure of the
1065 * calling thread.
1066 * @param cbMin The minimum number of bytes relative offOpcode
1067 * that must be read.
1068 */
1069VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1070{
1071 /*
1072 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1073 *
1074 * First translate CS:rIP to a physical address.
1075 */
1076 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1077 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1078 uint8_t const cbLeft = cbOpcode - offOpcode;
1079 Assert(cbLeft < cbMin);
1080 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1081
1082 uint32_t cbToTryRead;
1083 RTGCPTR GCPtrNext;
1084 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1085 {
1086 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1087 if (!IEM_IS_CANONICAL(GCPtrNext))
1088 return iemRaiseGeneralProtectionFault0(pVCpu);
1089 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1090 }
1091 else
1092 {
1093 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1094 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT); - this is allowed */
1095 GCPtrNext32 += cbOpcode;
1096 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1097 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1098 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1099 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1100 if (!cbToTryRead) /* overflowed */
1101 {
1102 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1103 cbToTryRead = UINT32_MAX;
1104 /** @todo check out wrapping around the code segment. */
1105 }
1106 if (cbToTryRead < cbMin - cbLeft)
1107 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1108 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1109
1110 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1111 if (cbToTryRead > cbLeftOnPage)
1112 cbToTryRead = cbLeftOnPage;
1113 }
1114
1115 /* Restrict to opcode buffer space.
1116
1117 We're making ASSUMPTIONS here based on work done previously in
1118 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1119 be fetched in case of an instruction crossing two pages. */
1120 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1121 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1122 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1123 { /* likely */ }
1124 else
1125 {
1126 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1127 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1128 return iemRaiseGeneralProtectionFault0(pVCpu);
1129 }
1130
1131 PGMPTWALK Walk;
1132 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1133 if (RT_FAILURE(rc))
1134 {
1135 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1136#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1137 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1138 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1139#endif
1140 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1141 }
1142 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1143 {
1144 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1145#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1146 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1147 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1148#endif
1149 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1150 }
1151 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1152 {
1153 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1154#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1155 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1156 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1157#endif
1158 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1159 }
1160 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1161 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1162 /** @todo Check reserved bits and such stuff. PGM is better at doing
1163 * that, so do it when implementing the guest virtual address
1164 * TLB... */
1165
1166 /*
1167 * Read the bytes at this address.
1168 *
1169 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1170 * and since PATM should only patch the start of an instruction there
1171 * should be no need to check again here.
1172 */
1173 if (!pVCpu->iem.s.fBypassHandlers)
1174 {
1175 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1176 cbToTryRead, PGMACCESSORIGIN_IEM);
1177 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1178 { /* likely */ }
1179 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1180 {
1181 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1182 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1183 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1184 }
1185 else
1186 {
1187 Log((RT_SUCCESS(rcStrict)
1188 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1189 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1190 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1191 return rcStrict;
1192 }
1193 }
1194 else
1195 {
1196 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1197 if (RT_SUCCESS(rc))
1198 { /* likely */ }
1199 else
1200 {
1201 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1202 return rc;
1203 }
1204 }
1205 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1206 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1207
1208 return VINF_SUCCESS;
1209}
1210
1211#endif /* !IEM_WITH_CODE_TLB */
1212#ifndef IEM_WITH_SETJMP
1213
1214/**
1215 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1216 *
1217 * @returns Strict VBox status code.
1218 * @param pVCpu The cross context virtual CPU structure of the
1219 * calling thread.
1220 * @param pb Where to return the opcode byte.
1221 */
1222VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1223{
1224 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1225 if (rcStrict == VINF_SUCCESS)
1226 {
1227 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1228 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1229 pVCpu->iem.s.offOpcode = offOpcode + 1;
1230 }
1231 else
1232 *pb = 0;
1233 return rcStrict;
1234}
1235
1236#else /* IEM_WITH_SETJMP */
1237
1238/**
1239 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1240 *
1241 * @returns The opcode byte.
1242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1243 */
1244uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1245{
1246# ifdef IEM_WITH_CODE_TLB
1247 uint8_t u8;
1248 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1249 return u8;
1250# else
1251 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1252 if (rcStrict == VINF_SUCCESS)
1253 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1254 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1255# endif
1256}
1257
1258#endif /* IEM_WITH_SETJMP */
1259
1260#ifndef IEM_WITH_SETJMP
1261
1262/**
1263 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1264 *
1265 * @returns Strict VBox status code.
1266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1267 * @param pu16 Where to return the opcode dword.
1268 */
1269VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1270{
1271 uint8_t u8;
1272 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1273 if (rcStrict == VINF_SUCCESS)
1274 *pu16 = (int8_t)u8;
1275 return rcStrict;
1276}
1277
1278
1279/**
1280 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1281 *
1282 * @returns Strict VBox status code.
1283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1284 * @param pu32 Where to return the opcode dword.
1285 */
1286VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1287{
1288 uint8_t u8;
1289 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1290 if (rcStrict == VINF_SUCCESS)
1291 *pu32 = (int8_t)u8;
1292 return rcStrict;
1293}
1294
1295
1296/**
1297 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1298 *
1299 * @returns Strict VBox status code.
1300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1301 * @param pu64 Where to return the opcode qword.
1302 */
1303VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1304{
1305 uint8_t u8;
1306 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1307 if (rcStrict == VINF_SUCCESS)
1308 *pu64 = (int8_t)u8;
1309 return rcStrict;
1310}
1311
1312#endif /* !IEM_WITH_SETJMP */
1313
1314
1315#ifndef IEM_WITH_SETJMP
1316
1317/**
1318 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1319 *
1320 * @returns Strict VBox status code.
1321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1322 * @param pu16 Where to return the opcode word.
1323 */
1324VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1325{
1326 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1327 if (rcStrict == VINF_SUCCESS)
1328 {
1329 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1330# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1331 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1332# else
1333 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1334# endif
1335 pVCpu->iem.s.offOpcode = offOpcode + 2;
1336 }
1337 else
1338 *pu16 = 0;
1339 return rcStrict;
1340}
1341
1342#else /* IEM_WITH_SETJMP */
1343
1344/**
1345 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1346 *
1347 * @returns The opcode word.
1348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1349 */
1350uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1351{
1352# ifdef IEM_WITH_CODE_TLB
1353 uint16_t u16;
1354 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1355 return u16;
1356# else
1357 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1358 if (rcStrict == VINF_SUCCESS)
1359 {
1360 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1361 pVCpu->iem.s.offOpcode += 2;
1362# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1363 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1364# else
1365 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1366# endif
1367 }
1368 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1369# endif
1370}
1371
1372#endif /* IEM_WITH_SETJMP */
1373
1374#ifndef IEM_WITH_SETJMP
1375
1376/**
1377 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1378 *
1379 * @returns Strict VBox status code.
1380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1381 * @param pu32 Where to return the opcode double word.
1382 */
1383VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1384{
1385 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1386 if (rcStrict == VINF_SUCCESS)
1387 {
1388 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1389 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1390 pVCpu->iem.s.offOpcode = offOpcode + 2;
1391 }
1392 else
1393 *pu32 = 0;
1394 return rcStrict;
1395}
1396
1397
1398/**
1399 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1403 * @param pu64 Where to return the opcode quad word.
1404 */
1405VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1406{
1407 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1408 if (rcStrict == VINF_SUCCESS)
1409 {
1410 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1411 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1412 pVCpu->iem.s.offOpcode = offOpcode + 2;
1413 }
1414 else
1415 *pu64 = 0;
1416 return rcStrict;
1417}
1418
1419#endif /* !IEM_WITH_SETJMP */
1420
1421#ifndef IEM_WITH_SETJMP
1422
1423/**
1424 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1425 *
1426 * @returns Strict VBox status code.
1427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1428 * @param pu32 Where to return the opcode dword.
1429 */
1430VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1431{
1432 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1433 if (rcStrict == VINF_SUCCESS)
1434 {
1435 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1436# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1437 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1438# else
1439 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1440 pVCpu->iem.s.abOpcode[offOpcode + 1],
1441 pVCpu->iem.s.abOpcode[offOpcode + 2],
1442 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1443# endif
1444 pVCpu->iem.s.offOpcode = offOpcode + 4;
1445 }
1446 else
1447 *pu32 = 0;
1448 return rcStrict;
1449}
1450
1451#else /* IEM_WITH_SETJMP */
1452
1453/**
1454 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1455 *
1456 * @returns The opcode dword.
1457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1458 */
1459uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1460{
1461# ifdef IEM_WITH_CODE_TLB
1462 uint32_t u32;
1463 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1464 return u32;
1465# else
1466 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1467 if (rcStrict == VINF_SUCCESS)
1468 {
1469 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1470 pVCpu->iem.s.offOpcode = offOpcode + 4;
1471# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1472 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1473# else
1474 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1475 pVCpu->iem.s.abOpcode[offOpcode + 1],
1476 pVCpu->iem.s.abOpcode[offOpcode + 2],
1477 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1478# endif
1479 }
1480 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1481# endif
1482}
1483
1484#endif /* IEM_WITH_SETJMP */
1485
1486#ifndef IEM_WITH_SETJMP
1487
1488/**
1489 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1490 *
1491 * @returns Strict VBox status code.
1492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1493 * @param pu64 Where to return the opcode dword.
1494 */
1495VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1496{
1497 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1498 if (rcStrict == VINF_SUCCESS)
1499 {
1500 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1501 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1502 pVCpu->iem.s.abOpcode[offOpcode + 1],
1503 pVCpu->iem.s.abOpcode[offOpcode + 2],
1504 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1505 pVCpu->iem.s.offOpcode = offOpcode + 4;
1506 }
1507 else
1508 *pu64 = 0;
1509 return rcStrict;
1510}
1511
1512
1513/**
1514 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1515 *
1516 * @returns Strict VBox status code.
1517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1518 * @param pu64 Where to return the opcode qword.
1519 */
1520VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1521{
1522 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1523 if (rcStrict == VINF_SUCCESS)
1524 {
1525 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1526 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1527 pVCpu->iem.s.abOpcode[offOpcode + 1],
1528 pVCpu->iem.s.abOpcode[offOpcode + 2],
1529 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1530 pVCpu->iem.s.offOpcode = offOpcode + 4;
1531 }
1532 else
1533 *pu64 = 0;
1534 return rcStrict;
1535}
1536
1537#endif /* !IEM_WITH_SETJMP */
1538
1539#ifndef IEM_WITH_SETJMP
1540
1541/**
1542 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1543 *
1544 * @returns Strict VBox status code.
1545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1546 * @param pu64 Where to return the opcode qword.
1547 */
1548VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1549{
1550 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1551 if (rcStrict == VINF_SUCCESS)
1552 {
1553 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1554# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1555 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1556# else
1557 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1558 pVCpu->iem.s.abOpcode[offOpcode + 1],
1559 pVCpu->iem.s.abOpcode[offOpcode + 2],
1560 pVCpu->iem.s.abOpcode[offOpcode + 3],
1561 pVCpu->iem.s.abOpcode[offOpcode + 4],
1562 pVCpu->iem.s.abOpcode[offOpcode + 5],
1563 pVCpu->iem.s.abOpcode[offOpcode + 6],
1564 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1565# endif
1566 pVCpu->iem.s.offOpcode = offOpcode + 8;
1567 }
1568 else
1569 *pu64 = 0;
1570 return rcStrict;
1571}
1572
1573#else /* IEM_WITH_SETJMP */
1574
1575/**
1576 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1577 *
1578 * @returns The opcode qword.
1579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1580 */
1581uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1582{
1583# ifdef IEM_WITH_CODE_TLB
1584 uint64_t u64;
1585 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1586 return u64;
1587# else
1588 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1589 if (rcStrict == VINF_SUCCESS)
1590 {
1591 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1592 pVCpu->iem.s.offOpcode = offOpcode + 8;
1593# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1594 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1595# else
1596 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1597 pVCpu->iem.s.abOpcode[offOpcode + 1],
1598 pVCpu->iem.s.abOpcode[offOpcode + 2],
1599 pVCpu->iem.s.abOpcode[offOpcode + 3],
1600 pVCpu->iem.s.abOpcode[offOpcode + 4],
1601 pVCpu->iem.s.abOpcode[offOpcode + 5],
1602 pVCpu->iem.s.abOpcode[offOpcode + 6],
1603 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1604# endif
1605 }
1606 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1607# endif
1608}
1609
1610#endif /* IEM_WITH_SETJMP */
1611
1612
1613
1614/** @name Misc Worker Functions.
1615 * @{
1616 */
1617
1618/**
1619 * Gets the exception class for the specified exception vector.
1620 *
1621 * @returns The class of the specified exception.
1622 * @param uVector The exception vector.
1623 */
1624static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1625{
1626 Assert(uVector <= X86_XCPT_LAST);
1627 switch (uVector)
1628 {
1629 case X86_XCPT_DE:
1630 case X86_XCPT_TS:
1631 case X86_XCPT_NP:
1632 case X86_XCPT_SS:
1633 case X86_XCPT_GP:
1634 case X86_XCPT_SX: /* AMD only */
1635 return IEMXCPTCLASS_CONTRIBUTORY;
1636
1637 case X86_XCPT_PF:
1638 case X86_XCPT_VE: /* Intel only */
1639 return IEMXCPTCLASS_PAGE_FAULT;
1640
1641 case X86_XCPT_DF:
1642 return IEMXCPTCLASS_DOUBLE_FAULT;
1643 }
1644 return IEMXCPTCLASS_BENIGN;
1645}
1646
1647
1648/**
1649 * Evaluates how to handle an exception caused during delivery of another event
1650 * (exception / interrupt).
1651 *
1652 * @returns How to handle the recursive exception.
1653 * @param pVCpu The cross context virtual CPU structure of the
1654 * calling thread.
1655 * @param fPrevFlags The flags of the previous event.
1656 * @param uPrevVector The vector of the previous event.
1657 * @param fCurFlags The flags of the current exception.
1658 * @param uCurVector The vector of the current exception.
1659 * @param pfXcptRaiseInfo Where to store additional information about the
1660 * exception condition. Optional.
1661 */
1662VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1663 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1664{
1665 /*
1666 * Only CPU exceptions can be raised while delivering other events, software interrupt
1667 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1668 */
1669 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1670 Assert(pVCpu); RT_NOREF(pVCpu);
1671 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1672
1673 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1674 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1675 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1676 {
1677 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1678 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1679 {
1680 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1681 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1682 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1683 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1684 {
1685 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1686 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1687 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1688 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1689 uCurVector, pVCpu->cpum.GstCtx.cr2));
1690 }
1691 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1692 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1693 {
1694 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1695 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1696 }
1697 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1698 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1699 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1700 {
1701 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1702 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1703 }
1704 }
1705 else
1706 {
1707 if (uPrevVector == X86_XCPT_NMI)
1708 {
1709 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1710 if (uCurVector == X86_XCPT_PF)
1711 {
1712 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1713 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1714 }
1715 }
1716 else if ( uPrevVector == X86_XCPT_AC
1717 && uCurVector == X86_XCPT_AC)
1718 {
1719 enmRaise = IEMXCPTRAISE_CPU_HANG;
1720 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1721 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1722 }
1723 }
1724 }
1725 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1726 {
1727 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1728 if (uCurVector == X86_XCPT_PF)
1729 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1730 }
1731 else
1732 {
1733 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1734 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1735 }
1736
1737 if (pfXcptRaiseInfo)
1738 *pfXcptRaiseInfo = fRaiseInfo;
1739 return enmRaise;
1740}
1741
1742
1743/**
1744 * Enters the CPU shutdown state initiated by a triple fault or other
1745 * unrecoverable conditions.
1746 *
1747 * @returns Strict VBox status code.
1748 * @param pVCpu The cross context virtual CPU structure of the
1749 * calling thread.
1750 */
1751static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1752{
1753 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1754 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1755
1756 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1757 {
1758 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1759 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1760 }
1761
1762 RT_NOREF(pVCpu);
1763 return VINF_EM_TRIPLE_FAULT;
1764}
1765
1766
1767/**
1768 * Validates a new SS segment.
1769 *
1770 * @returns VBox strict status code.
1771 * @param pVCpu The cross context virtual CPU structure of the
1772 * calling thread.
1773 * @param NewSS The new SS selctor.
1774 * @param uCpl The CPL to load the stack for.
1775 * @param pDesc Where to return the descriptor.
1776 */
1777static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1778{
1779 /* Null selectors are not allowed (we're not called for dispatching
1780 interrupts with SS=0 in long mode). */
1781 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1782 {
1783 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1784 return iemRaiseTaskSwitchFault0(pVCpu);
1785 }
1786
1787 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1788 if ((NewSS & X86_SEL_RPL) != uCpl)
1789 {
1790 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1791 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1792 }
1793
1794 /*
1795 * Read the descriptor.
1796 */
1797 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1798 if (rcStrict != VINF_SUCCESS)
1799 return rcStrict;
1800
1801 /*
1802 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1803 */
1804 if (!pDesc->Legacy.Gen.u1DescType)
1805 {
1806 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1807 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1808 }
1809
1810 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1811 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1812 {
1813 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1814 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1815 }
1816 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1817 {
1818 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1819 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1820 }
1821
1822 /* Is it there? */
1823 /** @todo testcase: Is this checked before the canonical / limit check below? */
1824 if (!pDesc->Legacy.Gen.u1Present)
1825 {
1826 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1827 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1828 }
1829
1830 return VINF_SUCCESS;
1831}
1832
1833/** @} */
1834
1835
1836/** @name Raising Exceptions.
1837 *
1838 * @{
1839 */
1840
1841
1842/**
1843 * Loads the specified stack far pointer from the TSS.
1844 *
1845 * @returns VBox strict status code.
1846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1847 * @param uCpl The CPL to load the stack for.
1848 * @param pSelSS Where to return the new stack segment.
1849 * @param puEsp Where to return the new stack pointer.
1850 */
1851static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1852{
1853 VBOXSTRICTRC rcStrict;
1854 Assert(uCpl < 4);
1855
1856 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1857 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1858 {
1859 /*
1860 * 16-bit TSS (X86TSS16).
1861 */
1862 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1863 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1864 {
1865 uint32_t off = uCpl * 4 + 2;
1866 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1867 {
1868 /** @todo check actual access pattern here. */
1869 uint32_t u32Tmp = 0; /* gcc maybe... */
1870 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1871 if (rcStrict == VINF_SUCCESS)
1872 {
1873 *puEsp = RT_LOWORD(u32Tmp);
1874 *pSelSS = RT_HIWORD(u32Tmp);
1875 return VINF_SUCCESS;
1876 }
1877 }
1878 else
1879 {
1880 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1881 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1882 }
1883 break;
1884 }
1885
1886 /*
1887 * 32-bit TSS (X86TSS32).
1888 */
1889 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1890 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1891 {
1892 uint32_t off = uCpl * 8 + 4;
1893 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1894 {
1895/** @todo check actual access pattern here. */
1896 uint64_t u64Tmp;
1897 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1898 if (rcStrict == VINF_SUCCESS)
1899 {
1900 *puEsp = u64Tmp & UINT32_MAX;
1901 *pSelSS = (RTSEL)(u64Tmp >> 32);
1902 return VINF_SUCCESS;
1903 }
1904 }
1905 else
1906 {
1907 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1908 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1909 }
1910 break;
1911 }
1912
1913 default:
1914 AssertFailed();
1915 rcStrict = VERR_IEM_IPE_4;
1916 break;
1917 }
1918
1919 *puEsp = 0; /* make gcc happy */
1920 *pSelSS = 0; /* make gcc happy */
1921 return rcStrict;
1922}
1923
1924
1925/**
1926 * Loads the specified stack pointer from the 64-bit TSS.
1927 *
1928 * @returns VBox strict status code.
1929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1930 * @param uCpl The CPL to load the stack for.
1931 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1932 * @param puRsp Where to return the new stack pointer.
1933 */
1934static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1935{
1936 Assert(uCpl < 4);
1937 Assert(uIst < 8);
1938 *puRsp = 0; /* make gcc happy */
1939
1940 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1941 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1942
1943 uint32_t off;
1944 if (uIst)
1945 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1946 else
1947 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1948 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1949 {
1950 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1951 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1952 }
1953
1954 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1955}
1956
1957
1958/**
1959 * Adjust the CPU state according to the exception being raised.
1960 *
1961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1962 * @param u8Vector The exception that has been raised.
1963 */
1964DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1965{
1966 switch (u8Vector)
1967 {
1968 case X86_XCPT_DB:
1969 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1970 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1971 break;
1972 /** @todo Read the AMD and Intel exception reference... */
1973 }
1974}
1975
1976
1977/**
1978 * Implements exceptions and interrupts for real mode.
1979 *
1980 * @returns VBox strict status code.
1981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1982 * @param cbInstr The number of bytes to offset rIP by in the return
1983 * address.
1984 * @param u8Vector The interrupt / exception vector number.
1985 * @param fFlags The flags.
1986 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1987 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1988 */
1989static VBOXSTRICTRC
1990iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1991 uint8_t cbInstr,
1992 uint8_t u8Vector,
1993 uint32_t fFlags,
1994 uint16_t uErr,
1995 uint64_t uCr2) RT_NOEXCEPT
1996{
1997 NOREF(uErr); NOREF(uCr2);
1998 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1999
2000 /*
2001 * Read the IDT entry.
2002 */
2003 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2004 {
2005 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2006 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2007 }
2008 RTFAR16 Idte;
2009 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2010 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2011 {
2012 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2013 return rcStrict;
2014 }
2015
2016 /*
2017 * Push the stack frame.
2018 */
2019 uint16_t *pu16Frame;
2020 uint64_t uNewRsp;
2021 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2022 if (rcStrict != VINF_SUCCESS)
2023 return rcStrict;
2024
2025 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2026#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2027 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2028 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2029 fEfl |= UINT16_C(0xf000);
2030#endif
2031 pu16Frame[2] = (uint16_t)fEfl;
2032 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2033 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2034 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2035 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2036 return rcStrict;
2037
2038 /*
2039 * Load the vector address into cs:ip and make exception specific state
2040 * adjustments.
2041 */
2042 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2043 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2044 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2045 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2046 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2047 pVCpu->cpum.GstCtx.rip = Idte.off;
2048 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2049 IEMMISC_SET_EFL(pVCpu, fEfl);
2050
2051 /** @todo do we actually do this in real mode? */
2052 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2053 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2054
2055 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2056}
2057
2058
2059/**
2060 * Loads a NULL data selector into when coming from V8086 mode.
2061 *
2062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2063 * @param pSReg Pointer to the segment register.
2064 */
2065DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2066{
2067 pSReg->Sel = 0;
2068 pSReg->ValidSel = 0;
2069 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2070 {
2071 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2072 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2073 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2074 }
2075 else
2076 {
2077 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2078 /** @todo check this on AMD-V */
2079 pSReg->u64Base = 0;
2080 pSReg->u32Limit = 0;
2081 }
2082}
2083
2084
2085/**
2086 * Loads a segment selector during a task switch in V8086 mode.
2087 *
2088 * @param pSReg Pointer to the segment register.
2089 * @param uSel The selector value to load.
2090 */
2091DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2092{
2093 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2094 pSReg->Sel = uSel;
2095 pSReg->ValidSel = uSel;
2096 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2097 pSReg->u64Base = uSel << 4;
2098 pSReg->u32Limit = 0xffff;
2099 pSReg->Attr.u = 0xf3;
2100}
2101
2102
2103/**
2104 * Loads a segment selector during a task switch in protected mode.
2105 *
2106 * In this task switch scenario, we would throw \#TS exceptions rather than
2107 * \#GPs.
2108 *
2109 * @returns VBox strict status code.
2110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2111 * @param pSReg Pointer to the segment register.
2112 * @param uSel The new selector value.
2113 *
2114 * @remarks This does _not_ handle CS or SS.
2115 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2116 */
2117static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2118{
2119 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2120
2121 /* Null data selector. */
2122 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2123 {
2124 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2126 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2127 return VINF_SUCCESS;
2128 }
2129
2130 /* Fetch the descriptor. */
2131 IEMSELDESC Desc;
2132 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2133 if (rcStrict != VINF_SUCCESS)
2134 {
2135 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2136 VBOXSTRICTRC_VAL(rcStrict)));
2137 return rcStrict;
2138 }
2139
2140 /* Must be a data segment or readable code segment. */
2141 if ( !Desc.Legacy.Gen.u1DescType
2142 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2143 {
2144 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2145 Desc.Legacy.Gen.u4Type));
2146 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2147 }
2148
2149 /* Check privileges for data segments and non-conforming code segments. */
2150 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2151 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2152 {
2153 /* The RPL and the new CPL must be less than or equal to the DPL. */
2154 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2155 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2156 {
2157 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2158 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2159 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2160 }
2161 }
2162
2163 /* Is it there? */
2164 if (!Desc.Legacy.Gen.u1Present)
2165 {
2166 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2167 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2168 }
2169
2170 /* The base and limit. */
2171 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2172 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2173
2174 /*
2175 * Ok, everything checked out fine. Now set the accessed bit before
2176 * committing the result into the registers.
2177 */
2178 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2179 {
2180 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2181 if (rcStrict != VINF_SUCCESS)
2182 return rcStrict;
2183 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2184 }
2185
2186 /* Commit */
2187 pSReg->Sel = uSel;
2188 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2189 pSReg->u32Limit = cbLimit;
2190 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2191 pSReg->ValidSel = uSel;
2192 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2193 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2194 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2195
2196 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2197 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2198 return VINF_SUCCESS;
2199}
2200
2201
2202/**
2203 * Performs a task switch.
2204 *
2205 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2206 * caller is responsible for performing the necessary checks (like DPL, TSS
2207 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2208 * reference for JMP, CALL, IRET.
2209 *
2210 * If the task switch is the due to a software interrupt or hardware exception,
2211 * the caller is responsible for validating the TSS selector and descriptor. See
2212 * Intel Instruction reference for INT n.
2213 *
2214 * @returns VBox strict status code.
2215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2216 * @param enmTaskSwitch The cause of the task switch.
2217 * @param uNextEip The EIP effective after the task switch.
2218 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2219 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2220 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2221 * @param SelTSS The TSS selector of the new task.
2222 * @param pNewDescTSS Pointer to the new TSS descriptor.
2223 */
2224VBOXSTRICTRC
2225iemTaskSwitch(PVMCPUCC pVCpu,
2226 IEMTASKSWITCH enmTaskSwitch,
2227 uint32_t uNextEip,
2228 uint32_t fFlags,
2229 uint16_t uErr,
2230 uint64_t uCr2,
2231 RTSEL SelTSS,
2232 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2233{
2234 Assert(!IEM_IS_REAL_MODE(pVCpu));
2235 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2236 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2237
2238 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2239 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2240 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2241 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2242 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2243
2244 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2245 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2246
2247 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2248 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2249
2250 /* Update CR2 in case it's a page-fault. */
2251 /** @todo This should probably be done much earlier in IEM/PGM. See
2252 * @bugref{5653#c49}. */
2253 if (fFlags & IEM_XCPT_FLAGS_CR2)
2254 pVCpu->cpum.GstCtx.cr2 = uCr2;
2255
2256 /*
2257 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2258 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2259 */
2260 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2261 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2262 if (uNewTSSLimit < uNewTSSLimitMin)
2263 {
2264 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2265 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2266 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2267 }
2268
2269 /*
2270 * Task switches in VMX non-root mode always cause task switches.
2271 * The new TSS must have been read and validated (DPL, limits etc.) before a
2272 * task-switch VM-exit commences.
2273 *
2274 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2275 */
2276 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2277 {
2278 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2279 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2280 }
2281
2282 /*
2283 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2284 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2285 */
2286 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2287 {
2288 uint32_t const uExitInfo1 = SelTSS;
2289 uint32_t uExitInfo2 = uErr;
2290 switch (enmTaskSwitch)
2291 {
2292 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2293 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2294 default: break;
2295 }
2296 if (fFlags & IEM_XCPT_FLAGS_ERR)
2297 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2298 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2299 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2300
2301 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2302 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2303 RT_NOREF2(uExitInfo1, uExitInfo2);
2304 }
2305
2306 /*
2307 * Check the current TSS limit. The last written byte to the current TSS during the
2308 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2309 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2310 *
2311 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2312 * end up with smaller than "legal" TSS limits.
2313 */
2314 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2315 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2316 if (uCurTSSLimit < uCurTSSLimitMin)
2317 {
2318 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2319 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2320 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2321 }
2322
2323 /*
2324 * Verify that the new TSS can be accessed and map it. Map only the required contents
2325 * and not the entire TSS.
2326 */
2327 void *pvNewTSS;
2328 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2329 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2330 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2331 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2332 * not perform correct translation if this happens. See Intel spec. 7.2.1
2333 * "Task-State Segment". */
2334 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2335 if (rcStrict != VINF_SUCCESS)
2336 {
2337 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2338 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2339 return rcStrict;
2340 }
2341
2342 /*
2343 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2344 */
2345 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2346 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2347 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2348 {
2349 PX86DESC pDescCurTSS;
2350 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2351 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2352 if (rcStrict != VINF_SUCCESS)
2353 {
2354 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2355 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2356 return rcStrict;
2357 }
2358
2359 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2360 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2361 if (rcStrict != VINF_SUCCESS)
2362 {
2363 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2364 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2365 return rcStrict;
2366 }
2367
2368 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2369 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2370 {
2371 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2372 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2373 fEFlags &= ~X86_EFL_NT;
2374 }
2375 }
2376
2377 /*
2378 * Save the CPU state into the current TSS.
2379 */
2380 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2381 if (GCPtrNewTSS == GCPtrCurTSS)
2382 {
2383 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2384 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2385 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2386 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2387 pVCpu->cpum.GstCtx.ldtr.Sel));
2388 }
2389 if (fIsNewTSS386)
2390 {
2391 /*
2392 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2393 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2394 */
2395 void *pvCurTSS32;
2396 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2397 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2398 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2399 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2400 if (rcStrict != VINF_SUCCESS)
2401 {
2402 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2403 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2404 return rcStrict;
2405 }
2406
2407 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2408 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2409 pCurTSS32->eip = uNextEip;
2410 pCurTSS32->eflags = fEFlags;
2411 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2412 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2413 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2414 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2415 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2416 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2417 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2418 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2419 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2420 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2421 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2422 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2423 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2424 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2425
2426 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2427 if (rcStrict != VINF_SUCCESS)
2428 {
2429 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2430 VBOXSTRICTRC_VAL(rcStrict)));
2431 return rcStrict;
2432 }
2433 }
2434 else
2435 {
2436 /*
2437 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2438 */
2439 void *pvCurTSS16;
2440 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2441 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2442 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2443 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2444 if (rcStrict != VINF_SUCCESS)
2445 {
2446 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2447 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2448 return rcStrict;
2449 }
2450
2451 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2452 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2453 pCurTSS16->ip = uNextEip;
2454 pCurTSS16->flags = (uint16_t)fEFlags;
2455 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2456 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2457 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2458 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2459 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2460 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2461 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2462 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2463 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2464 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2465 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2466 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2467
2468 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2469 if (rcStrict != VINF_SUCCESS)
2470 {
2471 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2472 VBOXSTRICTRC_VAL(rcStrict)));
2473 return rcStrict;
2474 }
2475 }
2476
2477 /*
2478 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2479 */
2480 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2481 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2482 {
2483 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2484 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2485 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2486 }
2487
2488 /*
2489 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2490 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2491 */
2492 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2493 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2494 bool fNewDebugTrap;
2495 if (fIsNewTSS386)
2496 {
2497 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2498 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2499 uNewEip = pNewTSS32->eip;
2500 uNewEflags = pNewTSS32->eflags;
2501 uNewEax = pNewTSS32->eax;
2502 uNewEcx = pNewTSS32->ecx;
2503 uNewEdx = pNewTSS32->edx;
2504 uNewEbx = pNewTSS32->ebx;
2505 uNewEsp = pNewTSS32->esp;
2506 uNewEbp = pNewTSS32->ebp;
2507 uNewEsi = pNewTSS32->esi;
2508 uNewEdi = pNewTSS32->edi;
2509 uNewES = pNewTSS32->es;
2510 uNewCS = pNewTSS32->cs;
2511 uNewSS = pNewTSS32->ss;
2512 uNewDS = pNewTSS32->ds;
2513 uNewFS = pNewTSS32->fs;
2514 uNewGS = pNewTSS32->gs;
2515 uNewLdt = pNewTSS32->selLdt;
2516 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2517 }
2518 else
2519 {
2520 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2521 uNewCr3 = 0;
2522 uNewEip = pNewTSS16->ip;
2523 uNewEflags = pNewTSS16->flags;
2524 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2525 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2526 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2527 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2528 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2529 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2530 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2531 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2532 uNewES = pNewTSS16->es;
2533 uNewCS = pNewTSS16->cs;
2534 uNewSS = pNewTSS16->ss;
2535 uNewDS = pNewTSS16->ds;
2536 uNewFS = 0;
2537 uNewGS = 0;
2538 uNewLdt = pNewTSS16->selLdt;
2539 fNewDebugTrap = false;
2540 }
2541
2542 if (GCPtrNewTSS == GCPtrCurTSS)
2543 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2544 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2545
2546 /*
2547 * We're done accessing the new TSS.
2548 */
2549 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2550 if (rcStrict != VINF_SUCCESS)
2551 {
2552 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2553 return rcStrict;
2554 }
2555
2556 /*
2557 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2558 */
2559 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2560 {
2561 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2562 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2563 if (rcStrict != VINF_SUCCESS)
2564 {
2565 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2566 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2567 return rcStrict;
2568 }
2569
2570 /* Check that the descriptor indicates the new TSS is available (not busy). */
2571 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2572 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2573 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2574
2575 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2576 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2577 if (rcStrict != VINF_SUCCESS)
2578 {
2579 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2580 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2581 return rcStrict;
2582 }
2583 }
2584
2585 /*
2586 * From this point on, we're technically in the new task. We will defer exceptions
2587 * until the completion of the task switch but before executing any instructions in the new task.
2588 */
2589 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2590 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2591 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2592 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2593 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2594 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2595 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2596
2597 /* Set the busy bit in TR. */
2598 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2599
2600 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2601 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2602 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2603 {
2604 uNewEflags |= X86_EFL_NT;
2605 }
2606
2607 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2608 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2609 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2610
2611 pVCpu->cpum.GstCtx.eip = uNewEip;
2612 pVCpu->cpum.GstCtx.eax = uNewEax;
2613 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2614 pVCpu->cpum.GstCtx.edx = uNewEdx;
2615 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2616 pVCpu->cpum.GstCtx.esp = uNewEsp;
2617 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2618 pVCpu->cpum.GstCtx.esi = uNewEsi;
2619 pVCpu->cpum.GstCtx.edi = uNewEdi;
2620
2621 uNewEflags &= X86_EFL_LIVE_MASK;
2622 uNewEflags |= X86_EFL_RA1_MASK;
2623 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2624
2625 /*
2626 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2627 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2628 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2629 */
2630 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2631 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2632
2633 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2634 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2635
2636 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2637 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2638
2639 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2640 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2641
2642 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2643 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2644
2645 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2646 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2647 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2648
2649 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2650 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2651 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2652 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2653
2654 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2655 {
2656 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2657 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2658 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2659 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2660 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2661 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2662 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2663 }
2664
2665 /*
2666 * Switch CR3 for the new task.
2667 */
2668 if ( fIsNewTSS386
2669 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2670 {
2671 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2672 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2673 AssertRCSuccessReturn(rc, rc);
2674
2675 /* Inform PGM. */
2676 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2677 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2678 AssertRCReturn(rc, rc);
2679 /* ignore informational status codes */
2680
2681 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2682 }
2683
2684 /*
2685 * Switch LDTR for the new task.
2686 */
2687 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2688 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2689 else
2690 {
2691 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2692
2693 IEMSELDESC DescNewLdt;
2694 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2695 if (rcStrict != VINF_SUCCESS)
2696 {
2697 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2698 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2699 return rcStrict;
2700 }
2701 if ( !DescNewLdt.Legacy.Gen.u1Present
2702 || DescNewLdt.Legacy.Gen.u1DescType
2703 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2704 {
2705 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2706 uNewLdt, DescNewLdt.Legacy.u));
2707 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2708 }
2709
2710 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2711 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2712 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2713 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2714 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2715 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2716 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2717 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2718 }
2719
2720 IEMSELDESC DescSS;
2721 if (IEM_IS_V86_MODE(pVCpu))
2722 {
2723 pVCpu->iem.s.uCpl = 3;
2724 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2725 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2726 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2727 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2728 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2729 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2730
2731 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2732 DescSS.Legacy.u = 0;
2733 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2734 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2735 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2736 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2737 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2738 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2739 DescSS.Legacy.Gen.u2Dpl = 3;
2740 }
2741 else
2742 {
2743 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2744
2745 /*
2746 * Load the stack segment for the new task.
2747 */
2748 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2749 {
2750 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2751 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2752 }
2753
2754 /* Fetch the descriptor. */
2755 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2756 if (rcStrict != VINF_SUCCESS)
2757 {
2758 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2759 VBOXSTRICTRC_VAL(rcStrict)));
2760 return rcStrict;
2761 }
2762
2763 /* SS must be a data segment and writable. */
2764 if ( !DescSS.Legacy.Gen.u1DescType
2765 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2766 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2767 {
2768 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2769 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2770 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2771 }
2772
2773 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2774 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2775 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2776 {
2777 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2778 uNewCpl));
2779 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2780 }
2781
2782 /* Is it there? */
2783 if (!DescSS.Legacy.Gen.u1Present)
2784 {
2785 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2786 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2787 }
2788
2789 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2790 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2791
2792 /* Set the accessed bit before committing the result into SS. */
2793 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2794 {
2795 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2796 if (rcStrict != VINF_SUCCESS)
2797 return rcStrict;
2798 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2799 }
2800
2801 /* Commit SS. */
2802 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2803 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2804 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2805 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2806 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2807 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2809
2810 /* CPL has changed, update IEM before loading rest of segments. */
2811 pVCpu->iem.s.uCpl = uNewCpl;
2812
2813 /*
2814 * Load the data segments for the new task.
2815 */
2816 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2817 if (rcStrict != VINF_SUCCESS)
2818 return rcStrict;
2819 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2820 if (rcStrict != VINF_SUCCESS)
2821 return rcStrict;
2822 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2823 if (rcStrict != VINF_SUCCESS)
2824 return rcStrict;
2825 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2826 if (rcStrict != VINF_SUCCESS)
2827 return rcStrict;
2828
2829 /*
2830 * Load the code segment for the new task.
2831 */
2832 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2833 {
2834 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2835 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2836 }
2837
2838 /* Fetch the descriptor. */
2839 IEMSELDESC DescCS;
2840 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2841 if (rcStrict != VINF_SUCCESS)
2842 {
2843 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2844 return rcStrict;
2845 }
2846
2847 /* CS must be a code segment. */
2848 if ( !DescCS.Legacy.Gen.u1DescType
2849 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2850 {
2851 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2852 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2853 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2854 }
2855
2856 /* For conforming CS, DPL must be less than or equal to the RPL. */
2857 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2858 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2859 {
2860 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2861 DescCS.Legacy.Gen.u2Dpl));
2862 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2863 }
2864
2865 /* For non-conforming CS, DPL must match RPL. */
2866 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2867 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2868 {
2869 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2870 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2871 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2872 }
2873
2874 /* Is it there? */
2875 if (!DescCS.Legacy.Gen.u1Present)
2876 {
2877 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2878 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2879 }
2880
2881 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2882 u64Base = X86DESC_BASE(&DescCS.Legacy);
2883
2884 /* Set the accessed bit before committing the result into CS. */
2885 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2886 {
2887 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2888 if (rcStrict != VINF_SUCCESS)
2889 return rcStrict;
2890 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2891 }
2892
2893 /* Commit CS. */
2894 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2895 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2896 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2897 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2898 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2899 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2900 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2901 }
2902
2903 /** @todo Debug trap. */
2904 if (fIsNewTSS386 && fNewDebugTrap)
2905 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2906
2907 /*
2908 * Construct the error code masks based on what caused this task switch.
2909 * See Intel Instruction reference for INT.
2910 */
2911 uint16_t uExt;
2912 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2913 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2914 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2915 {
2916 uExt = 1;
2917 }
2918 else
2919 uExt = 0;
2920
2921 /*
2922 * Push any error code on to the new stack.
2923 */
2924 if (fFlags & IEM_XCPT_FLAGS_ERR)
2925 {
2926 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2927 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2928 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2929
2930 /* Check that there is sufficient space on the stack. */
2931 /** @todo Factor out segment limit checking for normal/expand down segments
2932 * into a separate function. */
2933 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2934 {
2935 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2936 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2937 {
2938 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2939 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2940 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2941 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2942 }
2943 }
2944 else
2945 {
2946 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2947 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2948 {
2949 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2950 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2951 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2952 }
2953 }
2954
2955
2956 if (fIsNewTSS386)
2957 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2958 else
2959 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2960 if (rcStrict != VINF_SUCCESS)
2961 {
2962 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2963 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2964 return rcStrict;
2965 }
2966 }
2967
2968 /* Check the new EIP against the new CS limit. */
2969 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2970 {
2971 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2972 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2973 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2974 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2975 }
2976
2977 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2978 pVCpu->cpum.GstCtx.ss.Sel));
2979 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2980}
2981
2982
2983/**
2984 * Implements exceptions and interrupts for protected mode.
2985 *
2986 * @returns VBox strict status code.
2987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2988 * @param cbInstr The number of bytes to offset rIP by in the return
2989 * address.
2990 * @param u8Vector The interrupt / exception vector number.
2991 * @param fFlags The flags.
2992 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2993 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2994 */
2995static VBOXSTRICTRC
2996iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2997 uint8_t cbInstr,
2998 uint8_t u8Vector,
2999 uint32_t fFlags,
3000 uint16_t uErr,
3001 uint64_t uCr2) RT_NOEXCEPT
3002{
3003 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3004
3005 /*
3006 * Read the IDT entry.
3007 */
3008 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3009 {
3010 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3011 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3012 }
3013 X86DESC Idte;
3014 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3015 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3016 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3017 {
3018 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3019 return rcStrict;
3020 }
3021 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
3022 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3023 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3024
3025 /*
3026 * Check the descriptor type, DPL and such.
3027 * ASSUMES this is done in the same order as described for call-gate calls.
3028 */
3029 if (Idte.Gate.u1DescType)
3030 {
3031 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3032 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3033 }
3034 bool fTaskGate = false;
3035 uint8_t f32BitGate = true;
3036 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3037 switch (Idte.Gate.u4Type)
3038 {
3039 case X86_SEL_TYPE_SYS_UNDEFINED:
3040 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3041 case X86_SEL_TYPE_SYS_LDT:
3042 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3043 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3044 case X86_SEL_TYPE_SYS_UNDEFINED2:
3045 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3046 case X86_SEL_TYPE_SYS_UNDEFINED3:
3047 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3048 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3049 case X86_SEL_TYPE_SYS_UNDEFINED4:
3050 {
3051 /** @todo check what actually happens when the type is wrong...
3052 * esp. call gates. */
3053 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3054 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3055 }
3056
3057 case X86_SEL_TYPE_SYS_286_INT_GATE:
3058 f32BitGate = false;
3059 RT_FALL_THRU();
3060 case X86_SEL_TYPE_SYS_386_INT_GATE:
3061 fEflToClear |= X86_EFL_IF;
3062 break;
3063
3064 case X86_SEL_TYPE_SYS_TASK_GATE:
3065 fTaskGate = true;
3066#ifndef IEM_IMPLEMENTS_TASKSWITCH
3067 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3068#endif
3069 break;
3070
3071 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3072 f32BitGate = false;
3073 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3074 break;
3075
3076 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3077 }
3078
3079 /* Check DPL against CPL if applicable. */
3080 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3081 {
3082 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3083 {
3084 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3085 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3086 }
3087 }
3088
3089 /* Is it there? */
3090 if (!Idte.Gate.u1Present)
3091 {
3092 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3093 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3094 }
3095
3096 /* Is it a task-gate? */
3097 if (fTaskGate)
3098 {
3099 /*
3100 * Construct the error code masks based on what caused this task switch.
3101 * See Intel Instruction reference for INT.
3102 */
3103 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3104 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3105 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3106 RTSEL SelTSS = Idte.Gate.u16Sel;
3107
3108 /*
3109 * Fetch the TSS descriptor in the GDT.
3110 */
3111 IEMSELDESC DescTSS;
3112 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3113 if (rcStrict != VINF_SUCCESS)
3114 {
3115 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3116 VBOXSTRICTRC_VAL(rcStrict)));
3117 return rcStrict;
3118 }
3119
3120 /* The TSS descriptor must be a system segment and be available (not busy). */
3121 if ( DescTSS.Legacy.Gen.u1DescType
3122 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3123 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3124 {
3125 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3126 u8Vector, SelTSS, DescTSS.Legacy.au64));
3127 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3128 }
3129
3130 /* The TSS must be present. */
3131 if (!DescTSS.Legacy.Gen.u1Present)
3132 {
3133 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3134 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3135 }
3136
3137 /* Do the actual task switch. */
3138 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3139 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3140 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3141 }
3142
3143 /* A null CS is bad. */
3144 RTSEL NewCS = Idte.Gate.u16Sel;
3145 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3146 {
3147 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3148 return iemRaiseGeneralProtectionFault0(pVCpu);
3149 }
3150
3151 /* Fetch the descriptor for the new CS. */
3152 IEMSELDESC DescCS;
3153 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3154 if (rcStrict != VINF_SUCCESS)
3155 {
3156 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3157 return rcStrict;
3158 }
3159
3160 /* Must be a code segment. */
3161 if (!DescCS.Legacy.Gen.u1DescType)
3162 {
3163 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3164 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3165 }
3166 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3167 {
3168 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3169 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3170 }
3171
3172 /* Don't allow lowering the privilege level. */
3173 /** @todo Does the lowering of privileges apply to software interrupts
3174 * only? This has bearings on the more-privileged or
3175 * same-privilege stack behavior further down. A testcase would
3176 * be nice. */
3177 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3178 {
3179 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3180 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3181 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3182 }
3183
3184 /* Make sure the selector is present. */
3185 if (!DescCS.Legacy.Gen.u1Present)
3186 {
3187 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3188 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3189 }
3190
3191 /* Check the new EIP against the new CS limit. */
3192 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3193 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3194 ? Idte.Gate.u16OffsetLow
3195 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3196 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3197 if (uNewEip > cbLimitCS)
3198 {
3199 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3200 u8Vector, uNewEip, cbLimitCS, NewCS));
3201 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3202 }
3203 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3204
3205 /* Calc the flag image to push. */
3206 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3207 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3208 fEfl &= ~X86_EFL_RF;
3209 else
3210 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3211
3212 /* From V8086 mode only go to CPL 0. */
3213 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3214 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3215 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3216 {
3217 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3218 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3219 }
3220
3221 /*
3222 * If the privilege level changes, we need to get a new stack from the TSS.
3223 * This in turns means validating the new SS and ESP...
3224 */
3225 if (uNewCpl != pVCpu->iem.s.uCpl)
3226 {
3227 RTSEL NewSS;
3228 uint32_t uNewEsp;
3229 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3230 if (rcStrict != VINF_SUCCESS)
3231 return rcStrict;
3232
3233 IEMSELDESC DescSS;
3234 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3235 if (rcStrict != VINF_SUCCESS)
3236 return rcStrict;
3237 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3238 if (!DescSS.Legacy.Gen.u1DefBig)
3239 {
3240 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3241 uNewEsp = (uint16_t)uNewEsp;
3242 }
3243
3244 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3245
3246 /* Check that there is sufficient space for the stack frame. */
3247 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3248 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3249 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3250 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3251
3252 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3253 {
3254 if ( uNewEsp - 1 > cbLimitSS
3255 || uNewEsp < cbStackFrame)
3256 {
3257 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3258 u8Vector, NewSS, uNewEsp, cbStackFrame));
3259 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3260 }
3261 }
3262 else
3263 {
3264 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3265 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3266 {
3267 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3268 u8Vector, NewSS, uNewEsp, cbStackFrame));
3269 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3270 }
3271 }
3272
3273 /*
3274 * Start making changes.
3275 */
3276
3277 /* Set the new CPL so that stack accesses use it. */
3278 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3279 pVCpu->iem.s.uCpl = uNewCpl;
3280
3281 /* Create the stack frame. */
3282 RTPTRUNION uStackFrame;
3283 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3284 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3285 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3286 if (rcStrict != VINF_SUCCESS)
3287 return rcStrict;
3288 void * const pvStackFrame = uStackFrame.pv;
3289 if (f32BitGate)
3290 {
3291 if (fFlags & IEM_XCPT_FLAGS_ERR)
3292 *uStackFrame.pu32++ = uErr;
3293 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3294 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3295 uStackFrame.pu32[2] = fEfl;
3296 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3297 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3298 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3299 if (fEfl & X86_EFL_VM)
3300 {
3301 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3302 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3303 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3304 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3305 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3306 }
3307 }
3308 else
3309 {
3310 if (fFlags & IEM_XCPT_FLAGS_ERR)
3311 *uStackFrame.pu16++ = uErr;
3312 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3313 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3314 uStackFrame.pu16[2] = fEfl;
3315 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3316 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3317 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3318 if (fEfl & X86_EFL_VM)
3319 {
3320 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3321 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3322 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3323 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3324 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3325 }
3326 }
3327 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3328 if (rcStrict != VINF_SUCCESS)
3329 return rcStrict;
3330
3331 /* Mark the selectors 'accessed' (hope this is the correct time). */
3332 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3333 * after pushing the stack frame? (Write protect the gdt + stack to
3334 * find out.) */
3335 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3336 {
3337 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3338 if (rcStrict != VINF_SUCCESS)
3339 return rcStrict;
3340 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3341 }
3342
3343 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3344 {
3345 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3346 if (rcStrict != VINF_SUCCESS)
3347 return rcStrict;
3348 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3349 }
3350
3351 /*
3352 * Start comitting the register changes (joins with the DPL=CPL branch).
3353 */
3354 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3355 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3356 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3357 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3358 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3359 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3360 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3361 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3362 * SP is loaded).
3363 * Need to check the other combinations too:
3364 * - 16-bit TSS, 32-bit handler
3365 * - 32-bit TSS, 16-bit handler */
3366 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3367 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3368 else
3369 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3370
3371 if (fEfl & X86_EFL_VM)
3372 {
3373 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3374 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3375 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3376 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3377 }
3378 }
3379 /*
3380 * Same privilege, no stack change and smaller stack frame.
3381 */
3382 else
3383 {
3384 uint64_t uNewRsp;
3385 RTPTRUNION uStackFrame;
3386 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3387 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3388 if (rcStrict != VINF_SUCCESS)
3389 return rcStrict;
3390 void * const pvStackFrame = uStackFrame.pv;
3391
3392 if (f32BitGate)
3393 {
3394 if (fFlags & IEM_XCPT_FLAGS_ERR)
3395 *uStackFrame.pu32++ = uErr;
3396 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3397 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3398 uStackFrame.pu32[2] = fEfl;
3399 }
3400 else
3401 {
3402 if (fFlags & IEM_XCPT_FLAGS_ERR)
3403 *uStackFrame.pu16++ = uErr;
3404 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3405 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3406 uStackFrame.pu16[2] = fEfl;
3407 }
3408 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3409 if (rcStrict != VINF_SUCCESS)
3410 return rcStrict;
3411
3412 /* Mark the CS selector as 'accessed'. */
3413 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3414 {
3415 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3416 if (rcStrict != VINF_SUCCESS)
3417 return rcStrict;
3418 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3419 }
3420
3421 /*
3422 * Start committing the register changes (joins with the other branch).
3423 */
3424 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3425 }
3426
3427 /* ... register committing continues. */
3428 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3429 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3430 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3431 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3432 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3433 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3434
3435 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3436 fEfl &= ~fEflToClear;
3437 IEMMISC_SET_EFL(pVCpu, fEfl);
3438
3439 if (fFlags & IEM_XCPT_FLAGS_CR2)
3440 pVCpu->cpum.GstCtx.cr2 = uCr2;
3441
3442 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3443 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3444
3445 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3446}
3447
3448
3449/**
3450 * Implements exceptions and interrupts for long mode.
3451 *
3452 * @returns VBox strict status code.
3453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3454 * @param cbInstr The number of bytes to offset rIP by in the return
3455 * address.
3456 * @param u8Vector The interrupt / exception vector number.
3457 * @param fFlags The flags.
3458 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3459 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3460 */
3461static VBOXSTRICTRC
3462iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3463 uint8_t cbInstr,
3464 uint8_t u8Vector,
3465 uint32_t fFlags,
3466 uint16_t uErr,
3467 uint64_t uCr2) RT_NOEXCEPT
3468{
3469 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3470
3471 /*
3472 * Read the IDT entry.
3473 */
3474 uint16_t offIdt = (uint16_t)u8Vector << 4;
3475 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3476 {
3477 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3478 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3479 }
3480 X86DESC64 Idte;
3481#ifdef _MSC_VER /* Shut up silly compiler warning. */
3482 Idte.au64[0] = 0;
3483 Idte.au64[1] = 0;
3484#endif
3485 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3486 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3487 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3488 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3489 {
3490 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3491 return rcStrict;
3492 }
3493 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3494 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3495 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3496
3497 /*
3498 * Check the descriptor type, DPL and such.
3499 * ASSUMES this is done in the same order as described for call-gate calls.
3500 */
3501 if (Idte.Gate.u1DescType)
3502 {
3503 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3504 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3505 }
3506 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3507 switch (Idte.Gate.u4Type)
3508 {
3509 case AMD64_SEL_TYPE_SYS_INT_GATE:
3510 fEflToClear |= X86_EFL_IF;
3511 break;
3512 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3513 break;
3514
3515 default:
3516 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3517 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3518 }
3519
3520 /* Check DPL against CPL if applicable. */
3521 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3522 {
3523 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3524 {
3525 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3526 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3527 }
3528 }
3529
3530 /* Is it there? */
3531 if (!Idte.Gate.u1Present)
3532 {
3533 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3534 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3535 }
3536
3537 /* A null CS is bad. */
3538 RTSEL NewCS = Idte.Gate.u16Sel;
3539 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3540 {
3541 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3542 return iemRaiseGeneralProtectionFault0(pVCpu);
3543 }
3544
3545 /* Fetch the descriptor for the new CS. */
3546 IEMSELDESC DescCS;
3547 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3548 if (rcStrict != VINF_SUCCESS)
3549 {
3550 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3551 return rcStrict;
3552 }
3553
3554 /* Must be a 64-bit code segment. */
3555 if (!DescCS.Long.Gen.u1DescType)
3556 {
3557 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3558 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3559 }
3560 if ( !DescCS.Long.Gen.u1Long
3561 || DescCS.Long.Gen.u1DefBig
3562 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3563 {
3564 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3565 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3566 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3567 }
3568
3569 /* Don't allow lowering the privilege level. For non-conforming CS
3570 selectors, the CS.DPL sets the privilege level the trap/interrupt
3571 handler runs at. For conforming CS selectors, the CPL remains
3572 unchanged, but the CS.DPL must be <= CPL. */
3573 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3574 * when CPU in Ring-0. Result \#GP? */
3575 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3576 {
3577 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3578 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3579 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3580 }
3581
3582
3583 /* Make sure the selector is present. */
3584 if (!DescCS.Legacy.Gen.u1Present)
3585 {
3586 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3587 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3588 }
3589
3590 /* Check that the new RIP is canonical. */
3591 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3592 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3593 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3594 if (!IEM_IS_CANONICAL(uNewRip))
3595 {
3596 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3597 return iemRaiseGeneralProtectionFault0(pVCpu);
3598 }
3599
3600 /*
3601 * If the privilege level changes or if the IST isn't zero, we need to get
3602 * a new stack from the TSS.
3603 */
3604 uint64_t uNewRsp;
3605 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3606 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3607 if ( uNewCpl != pVCpu->iem.s.uCpl
3608 || Idte.Gate.u3IST != 0)
3609 {
3610 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3611 if (rcStrict != VINF_SUCCESS)
3612 return rcStrict;
3613 }
3614 else
3615 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3616 uNewRsp &= ~(uint64_t)0xf;
3617
3618 /*
3619 * Calc the flag image to push.
3620 */
3621 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3622 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3623 fEfl &= ~X86_EFL_RF;
3624 else
3625 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3626
3627 /*
3628 * Start making changes.
3629 */
3630 /* Set the new CPL so that stack accesses use it. */
3631 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3632 pVCpu->iem.s.uCpl = uNewCpl;
3633
3634 /* Create the stack frame. */
3635 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3636 RTPTRUNION uStackFrame;
3637 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3638 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3639 if (rcStrict != VINF_SUCCESS)
3640 return rcStrict;
3641 void * const pvStackFrame = uStackFrame.pv;
3642
3643 if (fFlags & IEM_XCPT_FLAGS_ERR)
3644 *uStackFrame.pu64++ = uErr;
3645 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3646 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3647 uStackFrame.pu64[2] = fEfl;
3648 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3649 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3650 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3651 if (rcStrict != VINF_SUCCESS)
3652 return rcStrict;
3653
3654 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3655 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3656 * after pushing the stack frame? (Write protect the gdt + stack to
3657 * find out.) */
3658 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3659 {
3660 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3661 if (rcStrict != VINF_SUCCESS)
3662 return rcStrict;
3663 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3664 }
3665
3666 /*
3667 * Start comitting the register changes.
3668 */
3669 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3670 * hidden registers when interrupting 32-bit or 16-bit code! */
3671 if (uNewCpl != uOldCpl)
3672 {
3673 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3674 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3675 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3676 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3677 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3678 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3679 }
3680 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3681 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3682 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3683 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3684 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3685 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3686 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3687 pVCpu->cpum.GstCtx.rip = uNewRip;
3688
3689 fEfl &= ~fEflToClear;
3690 IEMMISC_SET_EFL(pVCpu, fEfl);
3691
3692 if (fFlags & IEM_XCPT_FLAGS_CR2)
3693 pVCpu->cpum.GstCtx.cr2 = uCr2;
3694
3695 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3696 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3697
3698 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3699}
3700
3701
3702/**
3703 * Implements exceptions and interrupts.
3704 *
3705 * All exceptions and interrupts goes thru this function!
3706 *
3707 * @returns VBox strict status code.
3708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3709 * @param cbInstr The number of bytes to offset rIP by in the return
3710 * address.
3711 * @param u8Vector The interrupt / exception vector number.
3712 * @param fFlags The flags.
3713 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3714 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3715 */
3716VBOXSTRICTRC
3717iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3718 uint8_t cbInstr,
3719 uint8_t u8Vector,
3720 uint32_t fFlags,
3721 uint16_t uErr,
3722 uint64_t uCr2) RT_NOEXCEPT
3723{
3724 /*
3725 * Get all the state that we might need here.
3726 */
3727 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3728 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3729
3730#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3731 /*
3732 * Flush prefetch buffer
3733 */
3734 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3735#endif
3736
3737 /*
3738 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3739 */
3740 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3741 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3742 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3743 | IEM_XCPT_FLAGS_BP_INSTR
3744 | IEM_XCPT_FLAGS_ICEBP_INSTR
3745 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3746 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3747 {
3748 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3749 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3750 u8Vector = X86_XCPT_GP;
3751 uErr = 0;
3752 }
3753#ifdef DBGFTRACE_ENABLED
3754 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3755 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3756 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3757#endif
3758
3759 /*
3760 * Evaluate whether NMI blocking should be in effect.
3761 * Normally, NMI blocking is in effect whenever we inject an NMI.
3762 */
3763 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3764 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3765
3766#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3767 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3768 {
3769 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3770 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3771 return rcStrict0;
3772
3773 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3774 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3775 {
3776 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3777 fBlockNmi = false;
3778 }
3779 }
3780#endif
3781
3782#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3783 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3784 {
3785 /*
3786 * If the event is being injected as part of VMRUN, it isn't subject to event
3787 * intercepts in the nested-guest. However, secondary exceptions that occur
3788 * during injection of any event -are- subject to exception intercepts.
3789 *
3790 * See AMD spec. 15.20 "Event Injection".
3791 */
3792 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3793 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3794 else
3795 {
3796 /*
3797 * Check and handle if the event being raised is intercepted.
3798 */
3799 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3800 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3801 return rcStrict0;
3802 }
3803 }
3804#endif
3805
3806 /*
3807 * Set NMI blocking if necessary.
3808 */
3809 if (fBlockNmi)
3810 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3811
3812 /*
3813 * Do recursion accounting.
3814 */
3815 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3816 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3817 if (pVCpu->iem.s.cXcptRecursions == 0)
3818 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3819 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3820 else
3821 {
3822 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3823 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3824 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3825
3826 if (pVCpu->iem.s.cXcptRecursions >= 4)
3827 {
3828#ifdef DEBUG_bird
3829 AssertFailed();
3830#endif
3831 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3832 }
3833
3834 /*
3835 * Evaluate the sequence of recurring events.
3836 */
3837 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3838 NULL /* pXcptRaiseInfo */);
3839 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3840 { /* likely */ }
3841 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3842 {
3843 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3844 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3845 u8Vector = X86_XCPT_DF;
3846 uErr = 0;
3847#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3848 /* VMX nested-guest #DF intercept needs to be checked here. */
3849 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3850 {
3851 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3852 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3853 return rcStrict0;
3854 }
3855#endif
3856 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3857 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3858 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3859 }
3860 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3861 {
3862 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3863 return iemInitiateCpuShutdown(pVCpu);
3864 }
3865 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3866 {
3867 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3868 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3869 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3870 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3871 return VERR_EM_GUEST_CPU_HANG;
3872 }
3873 else
3874 {
3875 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3876 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3877 return VERR_IEM_IPE_9;
3878 }
3879
3880 /*
3881 * The 'EXT' bit is set when an exception occurs during deliver of an external
3882 * event (such as an interrupt or earlier exception)[1]. Privileged software
3883 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3884 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3885 *
3886 * [1] - Intel spec. 6.13 "Error Code"
3887 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3888 * [3] - Intel Instruction reference for INT n.
3889 */
3890 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3891 && (fFlags & IEM_XCPT_FLAGS_ERR)
3892 && u8Vector != X86_XCPT_PF
3893 && u8Vector != X86_XCPT_DF)
3894 {
3895 uErr |= X86_TRAP_ERR_EXTERNAL;
3896 }
3897 }
3898
3899 pVCpu->iem.s.cXcptRecursions++;
3900 pVCpu->iem.s.uCurXcpt = u8Vector;
3901 pVCpu->iem.s.fCurXcpt = fFlags;
3902 pVCpu->iem.s.uCurXcptErr = uErr;
3903 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3904
3905 /*
3906 * Extensive logging.
3907 */
3908#if defined(LOG_ENABLED) && defined(IN_RING3)
3909 if (LogIs3Enabled())
3910 {
3911 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3912 PVM pVM = pVCpu->CTX_SUFF(pVM);
3913 char szRegs[4096];
3914 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3915 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3916 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3917 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3918 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3919 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3920 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3921 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3922 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3923 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3924 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3925 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3926 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3927 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3928 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3929 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3930 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3931 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3932 " efer=%016VR{efer}\n"
3933 " pat=%016VR{pat}\n"
3934 " sf_mask=%016VR{sf_mask}\n"
3935 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3936 " lstar=%016VR{lstar}\n"
3937 " star=%016VR{star} cstar=%016VR{cstar}\n"
3938 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3939 );
3940
3941 char szInstr[256];
3942 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3943 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3944 szInstr, sizeof(szInstr), NULL);
3945 Log3(("%s%s\n", szRegs, szInstr));
3946 }
3947#endif /* LOG_ENABLED */
3948
3949 /*
3950 * Stats.
3951 */
3952 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3953 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3954 else if (u8Vector <= X86_XCPT_LAST)
3955 {
3956 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3957 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3958 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3959 }
3960
3961 /*
3962 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3963 * to ensure that a stale TLB or paging cache entry will only cause one
3964 * spurious #PF.
3965 */
3966 if ( u8Vector == X86_XCPT_PF
3967 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3968 IEMTlbInvalidatePage(pVCpu, uCr2);
3969
3970 /*
3971 * Call the mode specific worker function.
3972 */
3973 VBOXSTRICTRC rcStrict;
3974 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3975 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3976 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3977 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3978 else
3979 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3980
3981 /* Flush the prefetch buffer. */
3982#ifdef IEM_WITH_CODE_TLB
3983 pVCpu->iem.s.pbInstrBuf = NULL;
3984#else
3985 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3986#endif
3987
3988 /*
3989 * Unwind.
3990 */
3991 pVCpu->iem.s.cXcptRecursions--;
3992 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3993 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3994 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3995 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3996 pVCpu->iem.s.cXcptRecursions + 1));
3997 return rcStrict;
3998}
3999
4000#ifdef IEM_WITH_SETJMP
4001/**
4002 * See iemRaiseXcptOrInt. Will not return.
4003 */
4004DECL_NO_RETURN(void)
4005iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4006 uint8_t cbInstr,
4007 uint8_t u8Vector,
4008 uint32_t fFlags,
4009 uint16_t uErr,
4010 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4011{
4012 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4013 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4014}
4015#endif
4016
4017
4018/** \#DE - 00. */
4019VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4020{
4021 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4022}
4023
4024
4025/** \#DB - 01.
4026 * @note This automatically clear DR7.GD. */
4027VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4028{
4029 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4030 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4031 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4032}
4033
4034
4035/** \#BR - 05. */
4036VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4037{
4038 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4039}
4040
4041
4042/** \#UD - 06. */
4043VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4044{
4045 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4046}
4047
4048
4049/** \#NM - 07. */
4050VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4051{
4052 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4053}
4054
4055
4056/** \#TS(err) - 0a. */
4057VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4058{
4059 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4060}
4061
4062
4063/** \#TS(tr) - 0a. */
4064VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4065{
4066 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4067 pVCpu->cpum.GstCtx.tr.Sel, 0);
4068}
4069
4070
4071/** \#TS(0) - 0a. */
4072VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4073{
4074 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4075 0, 0);
4076}
4077
4078
4079/** \#TS(err) - 0a. */
4080VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4081{
4082 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4083 uSel & X86_SEL_MASK_OFF_RPL, 0);
4084}
4085
4086
4087/** \#NP(err) - 0b. */
4088VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4089{
4090 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4091}
4092
4093
4094/** \#NP(sel) - 0b. */
4095VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4096{
4097 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4098 uSel & ~X86_SEL_RPL, 0);
4099}
4100
4101
4102/** \#SS(seg) - 0c. */
4103VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4104{
4105 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4106 uSel & ~X86_SEL_RPL, 0);
4107}
4108
4109
4110/** \#SS(err) - 0c. */
4111VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4112{
4113 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4114}
4115
4116
4117/** \#GP(n) - 0d. */
4118VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4119{
4120 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4121}
4122
4123
4124/** \#GP(0) - 0d. */
4125VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4126{
4127 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4128}
4129
4130#ifdef IEM_WITH_SETJMP
4131/** \#GP(0) - 0d. */
4132DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4133{
4134 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4135}
4136#endif
4137
4138
4139/** \#GP(sel) - 0d. */
4140VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4141{
4142 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4143 Sel & ~X86_SEL_RPL, 0);
4144}
4145
4146
4147/** \#GP(0) - 0d. */
4148VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4149{
4150 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4151}
4152
4153
4154/** \#GP(sel) - 0d. */
4155VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4156{
4157 NOREF(iSegReg); NOREF(fAccess);
4158 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4159 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4160}
4161
4162#ifdef IEM_WITH_SETJMP
4163/** \#GP(sel) - 0d, longjmp. */
4164DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4165{
4166 NOREF(iSegReg); NOREF(fAccess);
4167 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4168 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4169}
4170#endif
4171
4172/** \#GP(sel) - 0d. */
4173VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4174{
4175 NOREF(Sel);
4176 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4177}
4178
4179#ifdef IEM_WITH_SETJMP
4180/** \#GP(sel) - 0d, longjmp. */
4181DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4182{
4183 NOREF(Sel);
4184 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4185}
4186#endif
4187
4188
4189/** \#GP(sel) - 0d. */
4190VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4191{
4192 NOREF(iSegReg); NOREF(fAccess);
4193 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4194}
4195
4196#ifdef IEM_WITH_SETJMP
4197/** \#GP(sel) - 0d, longjmp. */
4198DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4199{
4200 NOREF(iSegReg); NOREF(fAccess);
4201 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4202}
4203#endif
4204
4205
4206/** \#PF(n) - 0e. */
4207VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4208{
4209 uint16_t uErr;
4210 switch (rc)
4211 {
4212 case VERR_PAGE_NOT_PRESENT:
4213 case VERR_PAGE_TABLE_NOT_PRESENT:
4214 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4215 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4216 uErr = 0;
4217 break;
4218
4219 default:
4220 AssertMsgFailed(("%Rrc\n", rc));
4221 RT_FALL_THRU();
4222 case VERR_ACCESS_DENIED:
4223 uErr = X86_TRAP_PF_P;
4224 break;
4225
4226 /** @todo reserved */
4227 }
4228
4229 if (pVCpu->iem.s.uCpl == 3)
4230 uErr |= X86_TRAP_PF_US;
4231
4232 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4233 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4234 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4235 uErr |= X86_TRAP_PF_ID;
4236
4237#if 0 /* This is so much non-sense, really. Why was it done like that? */
4238 /* Note! RW access callers reporting a WRITE protection fault, will clear
4239 the READ flag before calling. So, read-modify-write accesses (RW)
4240 can safely be reported as READ faults. */
4241 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4242 uErr |= X86_TRAP_PF_RW;
4243#else
4244 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4245 {
4246 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4247 /// (regardless of outcome of the comparison in the latter case).
4248 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4249 uErr |= X86_TRAP_PF_RW;
4250 }
4251#endif
4252
4253 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4254 of the memory operand rather than at the start of it. (Not sure what
4255 happens if it crosses a page boundrary.) The current heuristics for
4256 this is to report the #PF for the last byte if the access is more than
4257 64 bytes. This is probably not correct, but we can work that out later,
4258 main objective now is to get FXSAVE to work like for real hardware and
4259 make bs3-cpu-basic2 work. */
4260 if (cbAccess <= 64)
4261 { /* likely*/ }
4262 else
4263 GCPtrWhere += cbAccess - 1;
4264
4265 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4266 uErr, GCPtrWhere);
4267}
4268
4269#ifdef IEM_WITH_SETJMP
4270/** \#PF(n) - 0e, longjmp. */
4271DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4272 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4273{
4274 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4275}
4276#endif
4277
4278
4279/** \#MF(0) - 10. */
4280VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4281{
4282 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4284
4285 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4286 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4287 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4288}
4289
4290
4291/** \#AC(0) - 11. */
4292VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4293{
4294 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4295}
4296
4297#ifdef IEM_WITH_SETJMP
4298/** \#AC(0) - 11, longjmp. */
4299DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4300{
4301 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4302}
4303#endif
4304
4305
4306/** \#XF(0)/\#XM(0) - 19. */
4307VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4308{
4309 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4310}
4311
4312
4313/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4314IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4315{
4316 NOREF(cbInstr);
4317 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4318}
4319
4320
4321/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4322IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4323{
4324 NOREF(cbInstr);
4325 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4326}
4327
4328
4329/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4330IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4331{
4332 NOREF(cbInstr);
4333 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4334}
4335
4336
4337/** @} */
4338
4339/** @name Common opcode decoders.
4340 * @{
4341 */
4342//#include <iprt/mem.h>
4343
4344/**
4345 * Used to add extra details about a stub case.
4346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4347 */
4348void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4349{
4350#if defined(LOG_ENABLED) && defined(IN_RING3)
4351 PVM pVM = pVCpu->CTX_SUFF(pVM);
4352 char szRegs[4096];
4353 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4354 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4355 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4356 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4357 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4358 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4359 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4360 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4361 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4362 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4363 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4364 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4365 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4366 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4367 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4368 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4369 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4370 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4371 " efer=%016VR{efer}\n"
4372 " pat=%016VR{pat}\n"
4373 " sf_mask=%016VR{sf_mask}\n"
4374 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4375 " lstar=%016VR{lstar}\n"
4376 " star=%016VR{star} cstar=%016VR{cstar}\n"
4377 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4378 );
4379
4380 char szInstr[256];
4381 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4382 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4383 szInstr, sizeof(szInstr), NULL);
4384
4385 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4386#else
4387 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4388#endif
4389}
4390
4391/** @} */
4392
4393
4394
4395/** @name Register Access.
4396 * @{
4397 */
4398
4399/**
4400 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4401 *
4402 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4403 * segment limit.
4404 *
4405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4406 * @param cbInstr Instruction size.
4407 * @param offNextInstr The offset of the next instruction.
4408 * @param enmEffOpSize Effective operand size.
4409 */
4410VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4411 IEMMODE enmEffOpSize) RT_NOEXCEPT
4412{
4413 switch (enmEffOpSize)
4414 {
4415 case IEMMODE_16BIT:
4416 {
4417 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4418 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4419 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no CS limit checks in 64-bit mode */))
4420 pVCpu->cpum.GstCtx.rip = uNewIp;
4421 else
4422 return iemRaiseGeneralProtectionFault0(pVCpu);
4423 break;
4424 }
4425
4426 case IEMMODE_32BIT:
4427 {
4428 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4429 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4430
4431 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4432 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4433 pVCpu->cpum.GstCtx.rip = uNewEip;
4434 else
4435 return iemRaiseGeneralProtectionFault0(pVCpu);
4436 break;
4437 }
4438
4439 case IEMMODE_64BIT:
4440 {
4441 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4442
4443 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4444 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4445 pVCpu->cpum.GstCtx.rip = uNewRip;
4446 else
4447 return iemRaiseGeneralProtectionFault0(pVCpu);
4448 break;
4449 }
4450
4451 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4452 }
4453
4454#ifndef IEM_WITH_CODE_TLB
4455 /* Flush the prefetch buffer. */
4456 pVCpu->iem.s.cbOpcode = cbInstr;
4457#endif
4458
4459 /*
4460 * Clear RF and finish the instruction (maybe raise #DB).
4461 */
4462 return iemRegFinishClearingRF(pVCpu);
4463}
4464
4465
4466/**
4467 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4468 *
4469 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4470 * segment limit.
4471 *
4472 * @returns Strict VBox status code.
4473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4474 * @param cbInstr Instruction size.
4475 * @param offNextInstr The offset of the next instruction.
4476 */
4477VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4478{
4479 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4480
4481 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4482 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4483 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checking in 64-bit mode */))
4484 pVCpu->cpum.GstCtx.rip = uNewIp;
4485 else
4486 return iemRaiseGeneralProtectionFault0(pVCpu);
4487
4488#ifndef IEM_WITH_CODE_TLB
4489 /* Flush the prefetch buffer. */
4490 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4491#endif
4492
4493 /*
4494 * Clear RF and finish the instruction (maybe raise #DB).
4495 */
4496 return iemRegFinishClearingRF(pVCpu);
4497}
4498
4499
4500/**
4501 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4502 *
4503 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4504 * segment limit.
4505 *
4506 * @returns Strict VBox status code.
4507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4508 * @param cbInstr Instruction size.
4509 * @param offNextInstr The offset of the next instruction.
4510 * @param enmEffOpSize Effective operand size.
4511 */
4512VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4513 IEMMODE enmEffOpSize) RT_NOEXCEPT
4514{
4515 if (enmEffOpSize == IEMMODE_32BIT)
4516 {
4517 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4518
4519 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4520 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4521 pVCpu->cpum.GstCtx.rip = uNewEip;
4522 else
4523 return iemRaiseGeneralProtectionFault0(pVCpu);
4524 }
4525 else
4526 {
4527 Assert(enmEffOpSize == IEMMODE_64BIT);
4528
4529 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4530 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4531 pVCpu->cpum.GstCtx.rip = uNewRip;
4532 else
4533 return iemRaiseGeneralProtectionFault0(pVCpu);
4534 }
4535
4536#ifndef IEM_WITH_CODE_TLB
4537 /* Flush the prefetch buffer. */
4538 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4539#endif
4540
4541 /*
4542 * Clear RF and finish the instruction (maybe raise #DB).
4543 */
4544 return iemRegFinishClearingRF(pVCpu);
4545}
4546
4547
4548/**
4549 * Performs a near jump to the specified address.
4550 *
4551 * May raise a \#GP(0) if the new IP outside the code segment limit.
4552 *
4553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4554 * @param uNewIp The new IP value.
4555 */
4556VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4557{
4558 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4559 || pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT /* no limit checks in 64-bit mode */))
4560 pVCpu->cpum.GstCtx.rip = uNewIp;
4561 else
4562 return iemRaiseGeneralProtectionFault0(pVCpu);
4563 /** @todo Test 16-bit jump in 64-bit mode. */
4564
4565#ifndef IEM_WITH_CODE_TLB
4566 /* Flush the prefetch buffer. */
4567 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4568#endif
4569
4570 /*
4571 * Clear RF and finish the instruction (maybe raise #DB).
4572 */
4573 return iemRegFinishClearingRF(pVCpu);
4574}
4575
4576
4577/**
4578 * Performs a near jump to the specified address.
4579 *
4580 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4581 *
4582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4583 * @param uNewEip The new EIP value.
4584 */
4585VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4586{
4587 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4588 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4589
4590 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4591 pVCpu->cpum.GstCtx.rip = uNewEip;
4592 else
4593 return iemRaiseGeneralProtectionFault0(pVCpu);
4594
4595#ifndef IEM_WITH_CODE_TLB
4596 /* Flush the prefetch buffer. */
4597 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4598#endif
4599
4600 /*
4601 * Clear RF and finish the instruction (maybe raise #DB).
4602 */
4603 return iemRegFinishClearingRF(pVCpu);
4604}
4605
4606
4607/**
4608 * Performs a near jump to the specified address.
4609 *
4610 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4611 * segment limit.
4612 *
4613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4614 * @param uNewRip The new RIP value.
4615 */
4616VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4617{
4618 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4619
4620 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4621 pVCpu->cpum.GstCtx.rip = uNewRip;
4622 else
4623 return iemRaiseGeneralProtectionFault0(pVCpu);
4624
4625#ifndef IEM_WITH_CODE_TLB
4626 /* Flush the prefetch buffer. */
4627 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4628#endif
4629
4630 /*
4631 * Clear RF and finish the instruction (maybe raise #DB).
4632 */
4633 return iemRegFinishClearingRF(pVCpu);
4634}
4635
4636/** @} */
4637
4638
4639/** @name FPU access and helpers.
4640 *
4641 * @{
4642 */
4643
4644/**
4645 * Updates the x87.DS and FPUDP registers.
4646 *
4647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4648 * @param pFpuCtx The FPU context.
4649 * @param iEffSeg The effective segment register.
4650 * @param GCPtrEff The effective address relative to @a iEffSeg.
4651 */
4652DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4653{
4654 RTSEL sel;
4655 switch (iEffSeg)
4656 {
4657 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4658 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4659 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4660 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4661 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4662 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4663 default:
4664 AssertMsgFailed(("%d\n", iEffSeg));
4665 sel = pVCpu->cpum.GstCtx.ds.Sel;
4666 }
4667 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4668 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4669 {
4670 pFpuCtx->DS = 0;
4671 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4672 }
4673 else if (!IEM_IS_LONG_MODE(pVCpu))
4674 {
4675 pFpuCtx->DS = sel;
4676 pFpuCtx->FPUDP = GCPtrEff;
4677 }
4678 else
4679 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4680}
4681
4682
4683/**
4684 * Rotates the stack registers in the push direction.
4685 *
4686 * @param pFpuCtx The FPU context.
4687 * @remarks This is a complete waste of time, but fxsave stores the registers in
4688 * stack order.
4689 */
4690DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4691{
4692 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4693 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4694 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4695 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4696 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4697 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4698 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4699 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4700 pFpuCtx->aRegs[0].r80 = r80Tmp;
4701}
4702
4703
4704/**
4705 * Rotates the stack registers in the pop direction.
4706 *
4707 * @param pFpuCtx The FPU context.
4708 * @remarks This is a complete waste of time, but fxsave stores the registers in
4709 * stack order.
4710 */
4711DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4712{
4713 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4714 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4715 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4716 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4717 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4718 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4719 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4720 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4721 pFpuCtx->aRegs[7].r80 = r80Tmp;
4722}
4723
4724
4725/**
4726 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4727 * exception prevents it.
4728 *
4729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4730 * @param pResult The FPU operation result to push.
4731 * @param pFpuCtx The FPU context.
4732 */
4733static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4734{
4735 /* Update FSW and bail if there are pending exceptions afterwards. */
4736 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4737 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4738 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4739 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4740 {
4741 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4742 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4743 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4744 pFpuCtx->FSW = fFsw;
4745 return;
4746 }
4747
4748 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4749 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4750 {
4751 /* All is fine, push the actual value. */
4752 pFpuCtx->FTW |= RT_BIT(iNewTop);
4753 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4754 }
4755 else if (pFpuCtx->FCW & X86_FCW_IM)
4756 {
4757 /* Masked stack overflow, push QNaN. */
4758 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4759 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4760 }
4761 else
4762 {
4763 /* Raise stack overflow, don't push anything. */
4764 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4765 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4766 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4767 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4768 return;
4769 }
4770
4771 fFsw &= ~X86_FSW_TOP_MASK;
4772 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4773 pFpuCtx->FSW = fFsw;
4774
4775 iemFpuRotateStackPush(pFpuCtx);
4776 RT_NOREF(pVCpu);
4777}
4778
4779
4780/**
4781 * Stores a result in a FPU register and updates the FSW and FTW.
4782 *
4783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4784 * @param pFpuCtx The FPU context.
4785 * @param pResult The result to store.
4786 * @param iStReg Which FPU register to store it in.
4787 */
4788static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4789{
4790 Assert(iStReg < 8);
4791 uint16_t fNewFsw = pFpuCtx->FSW;
4792 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4793 fNewFsw &= ~X86_FSW_C_MASK;
4794 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4795 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4796 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4797 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4798 pFpuCtx->FSW = fNewFsw;
4799 pFpuCtx->FTW |= RT_BIT(iReg);
4800 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4801 RT_NOREF(pVCpu);
4802}
4803
4804
4805/**
4806 * Only updates the FPU status word (FSW) with the result of the current
4807 * instruction.
4808 *
4809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4810 * @param pFpuCtx The FPU context.
4811 * @param u16FSW The FSW output of the current instruction.
4812 */
4813static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4814{
4815 uint16_t fNewFsw = pFpuCtx->FSW;
4816 fNewFsw &= ~X86_FSW_C_MASK;
4817 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4818 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4819 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4820 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4821 pFpuCtx->FSW = fNewFsw;
4822 RT_NOREF(pVCpu);
4823}
4824
4825
4826/**
4827 * Pops one item off the FPU stack if no pending exception prevents it.
4828 *
4829 * @param pFpuCtx The FPU context.
4830 */
4831static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4832{
4833 /* Check pending exceptions. */
4834 uint16_t uFSW = pFpuCtx->FSW;
4835 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4836 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4837 return;
4838
4839 /* TOP--. */
4840 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4841 uFSW &= ~X86_FSW_TOP_MASK;
4842 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4843 pFpuCtx->FSW = uFSW;
4844
4845 /* Mark the previous ST0 as empty. */
4846 iOldTop >>= X86_FSW_TOP_SHIFT;
4847 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4848
4849 /* Rotate the registers. */
4850 iemFpuRotateStackPop(pFpuCtx);
4851}
4852
4853
4854/**
4855 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4856 *
4857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4858 * @param pResult The FPU operation result to push.
4859 */
4860void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4861{
4862 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4863 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4864 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4865}
4866
4867
4868/**
4869 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4870 * and sets FPUDP and FPUDS.
4871 *
4872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4873 * @param pResult The FPU operation result to push.
4874 * @param iEffSeg The effective segment register.
4875 * @param GCPtrEff The effective address relative to @a iEffSeg.
4876 */
4877void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4878{
4879 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4880 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4881 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4882 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4883}
4884
4885
4886/**
4887 * Replace ST0 with the first value and push the second onto the FPU stack,
4888 * unless a pending exception prevents it.
4889 *
4890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4891 * @param pResult The FPU operation result to store and push.
4892 */
4893void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4894{
4895 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4896 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4897
4898 /* Update FSW and bail if there are pending exceptions afterwards. */
4899 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4900 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4901 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4902 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4903 {
4904 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4905 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4906 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4907 pFpuCtx->FSW = fFsw;
4908 return;
4909 }
4910
4911 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4912 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4913 {
4914 /* All is fine, push the actual value. */
4915 pFpuCtx->FTW |= RT_BIT(iNewTop);
4916 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4917 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4918 }
4919 else if (pFpuCtx->FCW & X86_FCW_IM)
4920 {
4921 /* Masked stack overflow, push QNaN. */
4922 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4923 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4924 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4925 }
4926 else
4927 {
4928 /* Raise stack overflow, don't push anything. */
4929 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4930 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4931 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4932 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4933 return;
4934 }
4935
4936 fFsw &= ~X86_FSW_TOP_MASK;
4937 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4938 pFpuCtx->FSW = fFsw;
4939
4940 iemFpuRotateStackPush(pFpuCtx);
4941}
4942
4943
4944/**
4945 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4946 * FOP.
4947 *
4948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4949 * @param pResult The result to store.
4950 * @param iStReg Which FPU register to store it in.
4951 */
4952void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4953{
4954 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4955 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4956 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4957}
4958
4959
4960/**
4961 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4962 * FOP, and then pops the stack.
4963 *
4964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4965 * @param pResult The result to store.
4966 * @param iStReg Which FPU register to store it in.
4967 */
4968void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4969{
4970 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4971 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4972 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4973 iemFpuMaybePopOne(pFpuCtx);
4974}
4975
4976
4977/**
4978 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4979 * FPUDP, and FPUDS.
4980 *
4981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4982 * @param pResult The result to store.
4983 * @param iStReg Which FPU register to store it in.
4984 * @param iEffSeg The effective memory operand selector register.
4985 * @param GCPtrEff The effective memory operand offset.
4986 */
4987void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4988 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4989{
4990 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4991 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4992 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4993 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4994}
4995
4996
4997/**
4998 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4999 * FPUDP, and FPUDS, and then pops the stack.
5000 *
5001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5002 * @param pResult The result to store.
5003 * @param iStReg Which FPU register to store it in.
5004 * @param iEffSeg The effective memory operand selector register.
5005 * @param GCPtrEff The effective memory operand offset.
5006 */
5007void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5008 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5009{
5010 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5011 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5012 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5013 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5014 iemFpuMaybePopOne(pFpuCtx);
5015}
5016
5017
5018/**
5019 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5020 *
5021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5022 */
5023void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
5024{
5025 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5026 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5027}
5028
5029
5030/**
5031 * Updates the FSW, FOP, FPUIP, and FPUCS.
5032 *
5033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5034 * @param u16FSW The FSW from the current instruction.
5035 */
5036void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5037{
5038 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5039 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5040 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5041}
5042
5043
5044/**
5045 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5046 *
5047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5048 * @param u16FSW The FSW from the current instruction.
5049 */
5050void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5051{
5052 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5053 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5054 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5055 iemFpuMaybePopOne(pFpuCtx);
5056}
5057
5058
5059/**
5060 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5061 *
5062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5063 * @param u16FSW The FSW from the current instruction.
5064 * @param iEffSeg The effective memory operand selector register.
5065 * @param GCPtrEff The effective memory operand offset.
5066 */
5067void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5068{
5069 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5070 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5071 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5072 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5073}
5074
5075
5076/**
5077 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5078 *
5079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5080 * @param u16FSW The FSW from the current instruction.
5081 */
5082void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
5083{
5084 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5085 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5086 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5087 iemFpuMaybePopOne(pFpuCtx);
5088 iemFpuMaybePopOne(pFpuCtx);
5089}
5090
5091
5092/**
5093 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5094 *
5095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5096 * @param u16FSW The FSW from the current instruction.
5097 * @param iEffSeg The effective memory operand selector register.
5098 * @param GCPtrEff The effective memory operand offset.
5099 */
5100void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5101{
5102 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5103 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5104 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5105 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5106 iemFpuMaybePopOne(pFpuCtx);
5107}
5108
5109
5110/**
5111 * Worker routine for raising an FPU stack underflow exception.
5112 *
5113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5114 * @param pFpuCtx The FPU context.
5115 * @param iStReg The stack register being accessed.
5116 */
5117static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5118{
5119 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5120 if (pFpuCtx->FCW & X86_FCW_IM)
5121 {
5122 /* Masked underflow. */
5123 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5124 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5125 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5126 if (iStReg != UINT8_MAX)
5127 {
5128 pFpuCtx->FTW |= RT_BIT(iReg);
5129 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5130 }
5131 }
5132 else
5133 {
5134 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5135 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5136 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5137 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5138 }
5139 RT_NOREF(pVCpu);
5140}
5141
5142
5143/**
5144 * Raises a FPU stack underflow exception.
5145 *
5146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5147 * @param iStReg The destination register that should be loaded
5148 * with QNaN if \#IS is not masked. Specify
5149 * UINT8_MAX if none (like for fcom).
5150 */
5151void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5152{
5153 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5154 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5155 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5156}
5157
5158
5159void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5160{
5161 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5162 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5163 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5164 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5165}
5166
5167
5168void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
5169{
5170 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5171 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5172 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5173 iemFpuMaybePopOne(pFpuCtx);
5174}
5175
5176
5177void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5178{
5179 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5180 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5181 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5182 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5183 iemFpuMaybePopOne(pFpuCtx);
5184}
5185
5186
5187void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5188{
5189 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5190 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5191 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5192 iemFpuMaybePopOne(pFpuCtx);
5193 iemFpuMaybePopOne(pFpuCtx);
5194}
5195
5196
5197void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5198{
5199 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5200 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5201
5202 if (pFpuCtx->FCW & X86_FCW_IM)
5203 {
5204 /* Masked overflow - Push QNaN. */
5205 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5206 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5207 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5208 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5209 pFpuCtx->FTW |= RT_BIT(iNewTop);
5210 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5211 iemFpuRotateStackPush(pFpuCtx);
5212 }
5213 else
5214 {
5215 /* Exception pending - don't change TOP or the register stack. */
5216 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5217 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5218 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5219 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5220 }
5221}
5222
5223
5224void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5225{
5226 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5227 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5228
5229 if (pFpuCtx->FCW & X86_FCW_IM)
5230 {
5231 /* Masked overflow - Push QNaN. */
5232 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5233 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5234 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5235 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5236 pFpuCtx->FTW |= RT_BIT(iNewTop);
5237 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5238 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5239 iemFpuRotateStackPush(pFpuCtx);
5240 }
5241 else
5242 {
5243 /* Exception pending - don't change TOP or the register stack. */
5244 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5245 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5246 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5247 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5248 }
5249}
5250
5251
5252/**
5253 * Worker routine for raising an FPU stack overflow exception on a push.
5254 *
5255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5256 * @param pFpuCtx The FPU context.
5257 */
5258static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5259{
5260 if (pFpuCtx->FCW & X86_FCW_IM)
5261 {
5262 /* Masked overflow. */
5263 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5264 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5265 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5266 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5267 pFpuCtx->FTW |= RT_BIT(iNewTop);
5268 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5269 iemFpuRotateStackPush(pFpuCtx);
5270 }
5271 else
5272 {
5273 /* Exception pending - don't change TOP or the register stack. */
5274 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5275 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5276 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5277 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5278 }
5279 RT_NOREF(pVCpu);
5280}
5281
5282
5283/**
5284 * Raises a FPU stack overflow exception on a push.
5285 *
5286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5287 */
5288void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5289{
5290 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5291 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5292 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5293}
5294
5295
5296/**
5297 * Raises a FPU stack overflow exception on a push with a memory operand.
5298 *
5299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5300 * @param iEffSeg The effective memory operand selector register.
5301 * @param GCPtrEff The effective memory operand offset.
5302 */
5303void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5304{
5305 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5306 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5307 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5308 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5309}
5310
5311/** @} */
5312
5313
5314/** @name SSE+AVX SIMD access and helpers.
5315 *
5316 * @{
5317 */
5318/**
5319 * Stores a result in a SIMD XMM register, updates the MXCSR.
5320 *
5321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5322 * @param pResult The result to store.
5323 * @param iXmmReg Which SIMD XMM register to store the result in.
5324 */
5325void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5326{
5327 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5328 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5329
5330 /* The result is only updated if there is no unmasked exception pending. */
5331 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5332 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5333 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5334}
5335
5336
5337/**
5338 * Updates the MXCSR.
5339 *
5340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5341 * @param fMxcsr The new MXCSR value.
5342 */
5343void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5344{
5345 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5346 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5347}
5348/** @} */
5349
5350
5351/** @name Memory access.
5352 *
5353 * @{
5354 */
5355
5356
5357/**
5358 * Updates the IEMCPU::cbWritten counter if applicable.
5359 *
5360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5361 * @param fAccess The access being accounted for.
5362 * @param cbMem The access size.
5363 */
5364DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5365{
5366 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5367 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5368 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5369}
5370
5371
5372/**
5373 * Applies the segment limit, base and attributes.
5374 *
5375 * This may raise a \#GP or \#SS.
5376 *
5377 * @returns VBox strict status code.
5378 *
5379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5380 * @param fAccess The kind of access which is being performed.
5381 * @param iSegReg The index of the segment register to apply.
5382 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5383 * TSS, ++).
5384 * @param cbMem The access size.
5385 * @param pGCPtrMem Pointer to the guest memory address to apply
5386 * segmentation to. Input and output parameter.
5387 */
5388VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5389{
5390 if (iSegReg == UINT8_MAX)
5391 return VINF_SUCCESS;
5392
5393 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5394 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5395 switch (pVCpu->iem.s.enmCpuMode)
5396 {
5397 case IEMMODE_16BIT:
5398 case IEMMODE_32BIT:
5399 {
5400 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5401 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5402
5403 if ( pSel->Attr.n.u1Present
5404 && !pSel->Attr.n.u1Unusable)
5405 {
5406 Assert(pSel->Attr.n.u1DescType);
5407 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5408 {
5409 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5410 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5411 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5412
5413 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5414 {
5415 /** @todo CPL check. */
5416 }
5417
5418 /*
5419 * There are two kinds of data selectors, normal and expand down.
5420 */
5421 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5422 {
5423 if ( GCPtrFirst32 > pSel->u32Limit
5424 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5425 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5426 }
5427 else
5428 {
5429 /*
5430 * The upper boundary is defined by the B bit, not the G bit!
5431 */
5432 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5433 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5434 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5435 }
5436 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5437 }
5438 else
5439 {
5440 /*
5441 * Code selector and usually be used to read thru, writing is
5442 * only permitted in real and V8086 mode.
5443 */
5444 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5445 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5446 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5447 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5448 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5449
5450 if ( GCPtrFirst32 > pSel->u32Limit
5451 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5452 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5453
5454 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5455 {
5456 /** @todo CPL check. */
5457 }
5458
5459 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5460 }
5461 }
5462 else
5463 return iemRaiseGeneralProtectionFault0(pVCpu);
5464 return VINF_SUCCESS;
5465 }
5466
5467 case IEMMODE_64BIT:
5468 {
5469 RTGCPTR GCPtrMem = *pGCPtrMem;
5470 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5471 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5472
5473 Assert(cbMem >= 1);
5474 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5475 return VINF_SUCCESS;
5476 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5477 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5478 return iemRaiseGeneralProtectionFault0(pVCpu);
5479 }
5480
5481 default:
5482 AssertFailedReturn(VERR_IEM_IPE_7);
5483 }
5484}
5485
5486
5487/**
5488 * Translates a virtual address to a physical physical address and checks if we
5489 * can access the page as specified.
5490 *
5491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5492 * @param GCPtrMem The virtual address.
5493 * @param cbAccess The access size, for raising \#PF correctly for
5494 * FXSAVE and such.
5495 * @param fAccess The intended access.
5496 * @param pGCPhysMem Where to return the physical address.
5497 */
5498VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5499 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5500{
5501 /** @todo Need a different PGM interface here. We're currently using
5502 * generic / REM interfaces. this won't cut it for R0. */
5503 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5504 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5505 * here. */
5506 PGMPTWALK Walk;
5507 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5508 if (RT_FAILURE(rc))
5509 {
5510 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5511 /** @todo Check unassigned memory in unpaged mode. */
5512 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5513#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5514 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5515 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5516#endif
5517 *pGCPhysMem = NIL_RTGCPHYS;
5518 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5519 }
5520
5521 /* If the page is writable and does not have the no-exec bit set, all
5522 access is allowed. Otherwise we'll have to check more carefully... */
5523 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5524 {
5525 /* Write to read only memory? */
5526 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5527 && !(Walk.fEffective & X86_PTE_RW)
5528 && ( ( pVCpu->iem.s.uCpl == 3
5529 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5530 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5531 {
5532 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5533 *pGCPhysMem = NIL_RTGCPHYS;
5534#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5535 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5536 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5537#endif
5538 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5539 }
5540
5541 /* Kernel memory accessed by userland? */
5542 if ( !(Walk.fEffective & X86_PTE_US)
5543 && pVCpu->iem.s.uCpl == 3
5544 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5545 {
5546 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5547 *pGCPhysMem = NIL_RTGCPHYS;
5548#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5549 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5550 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5551#endif
5552 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5553 }
5554
5555 /* Executing non-executable memory? */
5556 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5557 && (Walk.fEffective & X86_PTE_PAE_NX)
5558 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5559 {
5560 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5561 *pGCPhysMem = NIL_RTGCPHYS;
5562#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5563 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5564 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5565#endif
5566 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5567 VERR_ACCESS_DENIED);
5568 }
5569 }
5570
5571 /*
5572 * Set the dirty / access flags.
5573 * ASSUMES this is set when the address is translated rather than on committ...
5574 */
5575 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5576 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5577 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5578 {
5579 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5580 AssertRC(rc2);
5581 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5582 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5583 }
5584
5585 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5586 *pGCPhysMem = GCPhys;
5587 return VINF_SUCCESS;
5588}
5589
5590
5591/**
5592 * Looks up a memory mapping entry.
5593 *
5594 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5596 * @param pvMem The memory address.
5597 * @param fAccess The access to.
5598 */
5599DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5600{
5601 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5602 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5603 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5604 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5605 return 0;
5606 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5607 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5608 return 1;
5609 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5610 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5611 return 2;
5612 return VERR_NOT_FOUND;
5613}
5614
5615
5616/**
5617 * Finds a free memmap entry when using iNextMapping doesn't work.
5618 *
5619 * @returns Memory mapping index, 1024 on failure.
5620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5621 */
5622static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5623{
5624 /*
5625 * The easy case.
5626 */
5627 if (pVCpu->iem.s.cActiveMappings == 0)
5628 {
5629 pVCpu->iem.s.iNextMapping = 1;
5630 return 0;
5631 }
5632
5633 /* There should be enough mappings for all instructions. */
5634 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5635
5636 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5637 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5638 return i;
5639
5640 AssertFailedReturn(1024);
5641}
5642
5643
5644/**
5645 * Commits a bounce buffer that needs writing back and unmaps it.
5646 *
5647 * @returns Strict VBox status code.
5648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5649 * @param iMemMap The index of the buffer to commit.
5650 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5651 * Always false in ring-3, obviously.
5652 */
5653static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5654{
5655 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5656 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5657#ifdef IN_RING3
5658 Assert(!fPostponeFail);
5659 RT_NOREF_PV(fPostponeFail);
5660#endif
5661
5662 /*
5663 * Do the writing.
5664 */
5665 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5666 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5667 {
5668 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5669 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5670 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5671 if (!pVCpu->iem.s.fBypassHandlers)
5672 {
5673 /*
5674 * Carefully and efficiently dealing with access handler return
5675 * codes make this a little bloated.
5676 */
5677 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5678 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5679 pbBuf,
5680 cbFirst,
5681 PGMACCESSORIGIN_IEM);
5682 if (rcStrict == VINF_SUCCESS)
5683 {
5684 if (cbSecond)
5685 {
5686 rcStrict = PGMPhysWrite(pVM,
5687 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5688 pbBuf + cbFirst,
5689 cbSecond,
5690 PGMACCESSORIGIN_IEM);
5691 if (rcStrict == VINF_SUCCESS)
5692 { /* nothing */ }
5693 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5694 {
5695 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5696 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5697 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5698 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5699 }
5700#ifndef IN_RING3
5701 else if (fPostponeFail)
5702 {
5703 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5704 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5705 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5706 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5707 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5708 return iemSetPassUpStatus(pVCpu, rcStrict);
5709 }
5710#endif
5711 else
5712 {
5713 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5714 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5715 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5716 return rcStrict;
5717 }
5718 }
5719 }
5720 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5721 {
5722 if (!cbSecond)
5723 {
5724 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5725 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5726 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5727 }
5728 else
5729 {
5730 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5731 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5732 pbBuf + cbFirst,
5733 cbSecond,
5734 PGMACCESSORIGIN_IEM);
5735 if (rcStrict2 == VINF_SUCCESS)
5736 {
5737 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5738 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5739 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5740 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5741 }
5742 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5743 {
5744 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5745 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5746 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5747 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5748 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5749 }
5750#ifndef IN_RING3
5751 else if (fPostponeFail)
5752 {
5753 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5754 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5755 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5756 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5757 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5758 return iemSetPassUpStatus(pVCpu, rcStrict);
5759 }
5760#endif
5761 else
5762 {
5763 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5764 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5765 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5766 return rcStrict2;
5767 }
5768 }
5769 }
5770#ifndef IN_RING3
5771 else if (fPostponeFail)
5772 {
5773 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5774 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5775 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5776 if (!cbSecond)
5777 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5778 else
5779 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5780 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5781 return iemSetPassUpStatus(pVCpu, rcStrict);
5782 }
5783#endif
5784 else
5785 {
5786 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5789 return rcStrict;
5790 }
5791 }
5792 else
5793 {
5794 /*
5795 * No access handlers, much simpler.
5796 */
5797 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5798 if (RT_SUCCESS(rc))
5799 {
5800 if (cbSecond)
5801 {
5802 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5803 if (RT_SUCCESS(rc))
5804 { /* likely */ }
5805 else
5806 {
5807 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5809 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5810 return rc;
5811 }
5812 }
5813 }
5814 else
5815 {
5816 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5819 return rc;
5820 }
5821 }
5822 }
5823
5824#if defined(IEM_LOG_MEMORY_WRITES)
5825 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5826 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5827 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5828 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5829 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5830 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5831
5832 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5833 g_cbIemWrote = cbWrote;
5834 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5835#endif
5836
5837 /*
5838 * Free the mapping entry.
5839 */
5840 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5841 Assert(pVCpu->iem.s.cActiveMappings != 0);
5842 pVCpu->iem.s.cActiveMappings--;
5843 return VINF_SUCCESS;
5844}
5845
5846
5847/**
5848 * iemMemMap worker that deals with a request crossing pages.
5849 */
5850static VBOXSTRICTRC
5851iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5852{
5853 Assert(cbMem <= GUEST_PAGE_SIZE);
5854
5855 /*
5856 * Do the address translations.
5857 */
5858 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5859 RTGCPHYS GCPhysFirst;
5860 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
5861 if (rcStrict != VINF_SUCCESS)
5862 return rcStrict;
5863 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
5864
5865 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
5866 RTGCPHYS GCPhysSecond;
5867 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5868 cbSecondPage, fAccess, &GCPhysSecond);
5869 if (rcStrict != VINF_SUCCESS)
5870 return rcStrict;
5871 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
5872 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
5873
5874 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5875
5876 /*
5877 * Read in the current memory content if it's a read, execute or partial
5878 * write access.
5879 */
5880 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5881
5882 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5883 {
5884 if (!pVCpu->iem.s.fBypassHandlers)
5885 {
5886 /*
5887 * Must carefully deal with access handler status codes here,
5888 * makes the code a bit bloated.
5889 */
5890 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5891 if (rcStrict == VINF_SUCCESS)
5892 {
5893 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5894 if (rcStrict == VINF_SUCCESS)
5895 { /*likely */ }
5896 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5897 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5898 else
5899 {
5900 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5901 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5902 return rcStrict;
5903 }
5904 }
5905 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5906 {
5907 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5908 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5909 {
5910 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5911 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5912 }
5913 else
5914 {
5915 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5916 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5917 return rcStrict2;
5918 }
5919 }
5920 else
5921 {
5922 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5923 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5924 return rcStrict;
5925 }
5926 }
5927 else
5928 {
5929 /*
5930 * No informational status codes here, much more straight forward.
5931 */
5932 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5933 if (RT_SUCCESS(rc))
5934 {
5935 Assert(rc == VINF_SUCCESS);
5936 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5937 if (RT_SUCCESS(rc))
5938 Assert(rc == VINF_SUCCESS);
5939 else
5940 {
5941 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5942 return rc;
5943 }
5944 }
5945 else
5946 {
5947 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5948 return rc;
5949 }
5950 }
5951 }
5952#ifdef VBOX_STRICT
5953 else
5954 memset(pbBuf, 0xcc, cbMem);
5955 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5956 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5957#endif
5958 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
5959
5960 /*
5961 * Commit the bounce buffer entry.
5962 */
5963 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5964 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5965 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5966 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5967 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5968 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5969 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5970 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5971 pVCpu->iem.s.cActiveMappings++;
5972
5973 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5974 *ppvMem = pbBuf;
5975 return VINF_SUCCESS;
5976}
5977
5978
5979/**
5980 * iemMemMap woker that deals with iemMemPageMap failures.
5981 */
5982static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5983 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5984{
5985 /*
5986 * Filter out conditions we can handle and the ones which shouldn't happen.
5987 */
5988 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5989 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5990 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5991 {
5992 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5993 return rcMap;
5994 }
5995 pVCpu->iem.s.cPotentialExits++;
5996
5997 /*
5998 * Read in the current memory content if it's a read, execute or partial
5999 * write access.
6000 */
6001 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6002 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6003 {
6004 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6005 memset(pbBuf, 0xff, cbMem);
6006 else
6007 {
6008 int rc;
6009 if (!pVCpu->iem.s.fBypassHandlers)
6010 {
6011 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6012 if (rcStrict == VINF_SUCCESS)
6013 { /* nothing */ }
6014 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6015 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6016 else
6017 {
6018 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6019 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6020 return rcStrict;
6021 }
6022 }
6023 else
6024 {
6025 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6026 if (RT_SUCCESS(rc))
6027 { /* likely */ }
6028 else
6029 {
6030 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6031 GCPhysFirst, rc));
6032 return rc;
6033 }
6034 }
6035 }
6036 }
6037#ifdef VBOX_STRICT
6038 else
6039 memset(pbBuf, 0xcc, cbMem);
6040#endif
6041#ifdef VBOX_STRICT
6042 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6043 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6044#endif
6045
6046 /*
6047 * Commit the bounce buffer entry.
6048 */
6049 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6050 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6051 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6052 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6053 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6054 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6055 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6056 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6057 pVCpu->iem.s.cActiveMappings++;
6058
6059 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6060 *ppvMem = pbBuf;
6061 return VINF_SUCCESS;
6062}
6063
6064
6065
6066/**
6067 * Maps the specified guest memory for the given kind of access.
6068 *
6069 * This may be using bounce buffering of the memory if it's crossing a page
6070 * boundary or if there is an access handler installed for any of it. Because
6071 * of lock prefix guarantees, we're in for some extra clutter when this
6072 * happens.
6073 *
6074 * This may raise a \#GP, \#SS, \#PF or \#AC.
6075 *
6076 * @returns VBox strict status code.
6077 *
6078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6079 * @param ppvMem Where to return the pointer to the mapped memory.
6080 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6081 * 8, 12, 16, 32 or 512. When used by string operations
6082 * it can be up to a page.
6083 * @param iSegReg The index of the segment register to use for this
6084 * access. The base and limits are checked. Use UINT8_MAX
6085 * to indicate that no segmentation is required (for IDT,
6086 * GDT and LDT accesses).
6087 * @param GCPtrMem The address of the guest memory.
6088 * @param fAccess How the memory is being accessed. The
6089 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6090 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6091 * when raising exceptions.
6092 * @param uAlignCtl Alignment control:
6093 * - Bits 15:0 is the alignment mask.
6094 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6095 * IEM_MEMMAP_F_ALIGN_SSE, and
6096 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6097 * Pass zero to skip alignment.
6098 */
6099VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6100 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6101{
6102 /*
6103 * Check the input and figure out which mapping entry to use.
6104 */
6105 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6106 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6107 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6108 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6109 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6110
6111 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6112 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6113 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6114 {
6115 iMemMap = iemMemMapFindFree(pVCpu);
6116 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6117 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6118 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6119 pVCpu->iem.s.aMemMappings[2].fAccess),
6120 VERR_IEM_IPE_9);
6121 }
6122
6123 /*
6124 * Map the memory, checking that we can actually access it. If something
6125 * slightly complicated happens, fall back on bounce buffering.
6126 */
6127 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6128 if (rcStrict == VINF_SUCCESS)
6129 { /* likely */ }
6130 else
6131 return rcStrict;
6132
6133 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6134 { /* likely */ }
6135 else
6136 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6137
6138 /*
6139 * Alignment check.
6140 */
6141 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6142 { /* likelyish */ }
6143 else
6144 {
6145 /* Misaligned access. */
6146 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6147 {
6148 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6149 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6150 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6151 {
6152 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6153
6154 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6155 return iemRaiseAlignmentCheckException(pVCpu);
6156 }
6157 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6158 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6159 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6160 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6161 * that's what FXSAVE does on a 10980xe. */
6162 && iemMemAreAlignmentChecksEnabled(pVCpu))
6163 return iemRaiseAlignmentCheckException(pVCpu);
6164 else
6165 return iemRaiseGeneralProtectionFault0(pVCpu);
6166 }
6167 }
6168
6169#ifdef IEM_WITH_DATA_TLB
6170 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6171
6172 /*
6173 * Get the TLB entry for this page.
6174 */
6175 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6176 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6177 if (pTlbe->uTag == uTag)
6178 {
6179# ifdef VBOX_WITH_STATISTICS
6180 pVCpu->iem.s.DataTlb.cTlbHits++;
6181# endif
6182 }
6183 else
6184 {
6185 pVCpu->iem.s.DataTlb.cTlbMisses++;
6186 PGMPTWALK Walk;
6187 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6188 if (RT_FAILURE(rc))
6189 {
6190 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6191# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6192 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6193 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6194# endif
6195 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6196 }
6197
6198 Assert(Walk.fSucceeded);
6199 pTlbe->uTag = uTag;
6200 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6201 pTlbe->GCPhys = Walk.GCPhys;
6202 pTlbe->pbMappingR3 = NULL;
6203 }
6204
6205 /*
6206 * Check TLB page table level access flags.
6207 */
6208 /* If the page is either supervisor only or non-writable, we need to do
6209 more careful access checks. */
6210 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6211 {
6212 /* Write to read only memory? */
6213 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6214 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6215 && ( ( pVCpu->iem.s.uCpl == 3
6216 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6217 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6218 {
6219 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6220# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6221 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6222 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6223# endif
6224 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6225 }
6226
6227 /* Kernel memory accessed by userland? */
6228 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6229 && pVCpu->iem.s.uCpl == 3
6230 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6231 {
6232 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6233# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6234 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6235 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6236# endif
6237 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6238 }
6239 }
6240
6241 /*
6242 * Set the dirty / access flags.
6243 * ASSUMES this is set when the address is translated rather than on commit...
6244 */
6245 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6246 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6247 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6248 {
6249 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6250 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6251 AssertRC(rc2);
6252 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6253 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6254 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6255 }
6256
6257 /*
6258 * Look up the physical page info if necessary.
6259 */
6260 uint8_t *pbMem = NULL;
6261 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6262# ifdef IN_RING3
6263 pbMem = pTlbe->pbMappingR3;
6264# else
6265 pbMem = NULL;
6266# endif
6267 else
6268 {
6269 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6270 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6271 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6272 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6273 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6274 { /* likely */ }
6275 else
6276 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6277 pTlbe->pbMappingR3 = NULL;
6278 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6279 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6280 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6281 &pbMem, &pTlbe->fFlagsAndPhysRev);
6282 AssertRCReturn(rc, rc);
6283# ifdef IN_RING3
6284 pTlbe->pbMappingR3 = pbMem;
6285# endif
6286 }
6287
6288 /*
6289 * Check the physical page level access and mapping.
6290 */
6291 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6292 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6293 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6294 { /* probably likely */ }
6295 else
6296 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6297 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6298 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6299 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6300 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6301 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6302
6303 if (pbMem)
6304 {
6305 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6306 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6307 fAccess |= IEM_ACCESS_NOT_LOCKED;
6308 }
6309 else
6310 {
6311 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6312 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6313 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6314 if (rcStrict != VINF_SUCCESS)
6315 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6316 }
6317
6318 void * const pvMem = pbMem;
6319
6320 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6321 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6322 if (fAccess & IEM_ACCESS_TYPE_READ)
6323 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6324
6325#else /* !IEM_WITH_DATA_TLB */
6326
6327 RTGCPHYS GCPhysFirst;
6328 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6329 if (rcStrict != VINF_SUCCESS)
6330 return rcStrict;
6331
6332 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6333 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6334 if (fAccess & IEM_ACCESS_TYPE_READ)
6335 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6336
6337 void *pvMem;
6338 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6339 if (rcStrict != VINF_SUCCESS)
6340 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6341
6342#endif /* !IEM_WITH_DATA_TLB */
6343
6344 /*
6345 * Fill in the mapping table entry.
6346 */
6347 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6348 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6349 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6350 pVCpu->iem.s.cActiveMappings += 1;
6351
6352 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6353 *ppvMem = pvMem;
6354
6355 return VINF_SUCCESS;
6356}
6357
6358
6359/**
6360 * Commits the guest memory if bounce buffered and unmaps it.
6361 *
6362 * @returns Strict VBox status code.
6363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6364 * @param pvMem The mapping.
6365 * @param fAccess The kind of access.
6366 */
6367VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6368{
6369 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6370 AssertReturn(iMemMap >= 0, iMemMap);
6371
6372 /* If it's bounce buffered, we may need to write back the buffer. */
6373 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6374 {
6375 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6376 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6377 }
6378 /* Otherwise unlock it. */
6379 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6380 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6381
6382 /* Free the entry. */
6383 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6384 Assert(pVCpu->iem.s.cActiveMappings != 0);
6385 pVCpu->iem.s.cActiveMappings--;
6386 return VINF_SUCCESS;
6387}
6388
6389#ifdef IEM_WITH_SETJMP
6390
6391/**
6392 * Maps the specified guest memory for the given kind of access, longjmp on
6393 * error.
6394 *
6395 * This may be using bounce buffering of the memory if it's crossing a page
6396 * boundary or if there is an access handler installed for any of it. Because
6397 * of lock prefix guarantees, we're in for some extra clutter when this
6398 * happens.
6399 *
6400 * This may raise a \#GP, \#SS, \#PF or \#AC.
6401 *
6402 * @returns Pointer to the mapped memory.
6403 *
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 * @param cbMem The number of bytes to map. This is usually 1,
6406 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6407 * string operations it can be up to a page.
6408 * @param iSegReg The index of the segment register to use for
6409 * this access. The base and limits are checked.
6410 * Use UINT8_MAX to indicate that no segmentation
6411 * is required (for IDT, GDT and LDT accesses).
6412 * @param GCPtrMem The address of the guest memory.
6413 * @param fAccess How the memory is being accessed. The
6414 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6415 * how to map the memory, while the
6416 * IEM_ACCESS_WHAT_XXX bit is used when raising
6417 * exceptions.
6418 * @param uAlignCtl Alignment control:
6419 * - Bits 15:0 is the alignment mask.
6420 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6421 * IEM_MEMMAP_F_ALIGN_SSE, and
6422 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6423 * Pass zero to skip alignment.
6424 */
6425void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6426 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6427{
6428 /*
6429 * Check the input, check segment access and adjust address
6430 * with segment base.
6431 */
6432 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6433 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6434 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6435
6436 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6437 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6438 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6439
6440 /*
6441 * Alignment check.
6442 */
6443 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6444 { /* likelyish */ }
6445 else
6446 {
6447 /* Misaligned access. */
6448 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6449 {
6450 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6451 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6452 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6453 {
6454 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6455
6456 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6457 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6458 }
6459 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6460 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6461 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6462 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6463 * that's what FXSAVE does on a 10980xe. */
6464 && iemMemAreAlignmentChecksEnabled(pVCpu))
6465 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6466 else
6467 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6468 }
6469 }
6470
6471 /*
6472 * Figure out which mapping entry to use.
6473 */
6474 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6475 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6476 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6477 {
6478 iMemMap = iemMemMapFindFree(pVCpu);
6479 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6480 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6481 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6482 pVCpu->iem.s.aMemMappings[2].fAccess),
6483 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6484 }
6485
6486 /*
6487 * Crossing a page boundary?
6488 */
6489 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6490 { /* No (likely). */ }
6491 else
6492 {
6493 void *pvMem;
6494 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6495 if (rcStrict == VINF_SUCCESS)
6496 return pvMem;
6497 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6498 }
6499
6500#ifdef IEM_WITH_DATA_TLB
6501 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6502
6503 /*
6504 * Get the TLB entry for this page.
6505 */
6506 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6507 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6508 if (pTlbe->uTag == uTag)
6509 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6510 else
6511 {
6512 pVCpu->iem.s.DataTlb.cTlbMisses++;
6513 PGMPTWALK Walk;
6514 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6515 if (RT_FAILURE(rc))
6516 {
6517 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6518# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6519 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6520 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6521# endif
6522 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6523 }
6524
6525 Assert(Walk.fSucceeded);
6526 pTlbe->uTag = uTag;
6527 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6528 pTlbe->GCPhys = Walk.GCPhys;
6529 pTlbe->pbMappingR3 = NULL;
6530 }
6531
6532 /*
6533 * Check the flags and physical revision.
6534 */
6535 /** @todo make the caller pass these in with fAccess. */
6536 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6537 ? IEMTLBE_F_PT_NO_USER : 0;
6538 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6539 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6540 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6541 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6542 ? IEMTLBE_F_PT_NO_WRITE : 0)
6543 : 0;
6544 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6545 uint8_t *pbMem = NULL;
6546 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6547 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6548# ifdef IN_RING3
6549 pbMem = pTlbe->pbMappingR3;
6550# else
6551 pbMem = NULL;
6552# endif
6553 else
6554 {
6555 /*
6556 * Okay, something isn't quite right or needs refreshing.
6557 */
6558 /* Write to read only memory? */
6559 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6560 {
6561 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6562# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6563 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6564 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6565# endif
6566 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6567 }
6568
6569 /* Kernel memory accessed by userland? */
6570 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6571 {
6572 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6573# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6574 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6575 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6576# endif
6577 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6578 }
6579
6580 /* Set the dirty / access flags.
6581 ASSUMES this is set when the address is translated rather than on commit... */
6582 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6583 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6584 {
6585 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6586 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6587 AssertRC(rc2);
6588 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6589 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6590 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6591 }
6592
6593 /*
6594 * Check if the physical page info needs updating.
6595 */
6596 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6597# ifdef IN_RING3
6598 pbMem = pTlbe->pbMappingR3;
6599# else
6600 pbMem = NULL;
6601# endif
6602 else
6603 {
6604 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6605 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6606 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6607 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6608 pTlbe->pbMappingR3 = NULL;
6609 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6610 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6611 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6612 &pbMem, &pTlbe->fFlagsAndPhysRev);
6613 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6614# ifdef IN_RING3
6615 pTlbe->pbMappingR3 = pbMem;
6616# endif
6617 }
6618
6619 /*
6620 * Check the physical page level access and mapping.
6621 */
6622 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6623 { /* probably likely */ }
6624 else
6625 {
6626 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6627 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6628 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6629 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6630 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6631 if (rcStrict == VINF_SUCCESS)
6632 return pbMem;
6633 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6634 }
6635 }
6636 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6637
6638 if (pbMem)
6639 {
6640 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6641 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6642 fAccess |= IEM_ACCESS_NOT_LOCKED;
6643 }
6644 else
6645 {
6646 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6647 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6648 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6649 if (rcStrict == VINF_SUCCESS)
6650 return pbMem;
6651 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6652 }
6653
6654 void * const pvMem = pbMem;
6655
6656 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6657 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6658 if (fAccess & IEM_ACCESS_TYPE_READ)
6659 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6660
6661#else /* !IEM_WITH_DATA_TLB */
6662
6663
6664 RTGCPHYS GCPhysFirst;
6665 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6666 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6667 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6668
6669 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6670 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6671 if (fAccess & IEM_ACCESS_TYPE_READ)
6672 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6673
6674 void *pvMem;
6675 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6676 if (rcStrict == VINF_SUCCESS)
6677 { /* likely */ }
6678 else
6679 {
6680 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6681 if (rcStrict == VINF_SUCCESS)
6682 return pvMem;
6683 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6684 }
6685
6686#endif /* !IEM_WITH_DATA_TLB */
6687
6688 /*
6689 * Fill in the mapping table entry.
6690 */
6691 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6692 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6693 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6694 pVCpu->iem.s.cActiveMappings++;
6695
6696 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6697 return pvMem;
6698}
6699
6700
6701/**
6702 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6703 *
6704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6705 * @param pvMem The mapping.
6706 * @param fAccess The kind of access.
6707 */
6708void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6709{
6710 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6711 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6712
6713 /* If it's bounce buffered, we may need to write back the buffer. */
6714 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6715 {
6716 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6717 {
6718 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6719 if (rcStrict == VINF_SUCCESS)
6720 return;
6721 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6722 }
6723 }
6724 /* Otherwise unlock it. */
6725 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6726 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6727
6728 /* Free the entry. */
6729 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6730 Assert(pVCpu->iem.s.cActiveMappings != 0);
6731 pVCpu->iem.s.cActiveMappings--;
6732}
6733
6734#endif /* IEM_WITH_SETJMP */
6735
6736#ifndef IN_RING3
6737/**
6738 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6739 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6740 *
6741 * Allows the instruction to be completed and retired, while the IEM user will
6742 * return to ring-3 immediately afterwards and do the postponed writes there.
6743 *
6744 * @returns VBox status code (no strict statuses). Caller must check
6745 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6747 * @param pvMem The mapping.
6748 * @param fAccess The kind of access.
6749 */
6750VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6751{
6752 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6753 AssertReturn(iMemMap >= 0, iMemMap);
6754
6755 /* If it's bounce buffered, we may need to write back the buffer. */
6756 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6757 {
6758 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6759 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6760 }
6761 /* Otherwise unlock it. */
6762 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6763 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6764
6765 /* Free the entry. */
6766 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6767 Assert(pVCpu->iem.s.cActiveMappings != 0);
6768 pVCpu->iem.s.cActiveMappings--;
6769 return VINF_SUCCESS;
6770}
6771#endif
6772
6773
6774/**
6775 * Rollbacks mappings, releasing page locks and such.
6776 *
6777 * The caller shall only call this after checking cActiveMappings.
6778 *
6779 * @returns Strict VBox status code to pass up.
6780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6781 */
6782void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6783{
6784 Assert(pVCpu->iem.s.cActiveMappings > 0);
6785
6786 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6787 while (iMemMap-- > 0)
6788 {
6789 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6790 if (fAccess != IEM_ACCESS_INVALID)
6791 {
6792 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6793 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6794 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6795 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6796 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6797 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6798 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6799 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6800 pVCpu->iem.s.cActiveMappings--;
6801 }
6802 }
6803}
6804
6805
6806/**
6807 * Fetches a data byte.
6808 *
6809 * @returns Strict VBox status code.
6810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6811 * @param pu8Dst Where to return the byte.
6812 * @param iSegReg The index of the segment register to use for
6813 * this access. The base and limits are checked.
6814 * @param GCPtrMem The address of the guest memory.
6815 */
6816VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6817{
6818 /* The lazy approach for now... */
6819 uint8_t const *pu8Src;
6820 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6821 if (rc == VINF_SUCCESS)
6822 {
6823 *pu8Dst = *pu8Src;
6824 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6825 }
6826 return rc;
6827}
6828
6829
6830#ifdef IEM_WITH_SETJMP
6831/**
6832 * Fetches a data byte, longjmp on error.
6833 *
6834 * @returns The byte.
6835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6836 * @param iSegReg The index of the segment register to use for
6837 * this access. The base and limits are checked.
6838 * @param GCPtrMem The address of the guest memory.
6839 */
6840uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6841{
6842 /* The lazy approach for now... */
6843 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6844 uint8_t const bRet = *pu8Src;
6845 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6846 return bRet;
6847}
6848#endif /* IEM_WITH_SETJMP */
6849
6850
6851/**
6852 * Fetches a data word.
6853 *
6854 * @returns Strict VBox status code.
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param pu16Dst Where to return the word.
6857 * @param iSegReg The index of the segment register to use for
6858 * this access. The base and limits are checked.
6859 * @param GCPtrMem The address of the guest memory.
6860 */
6861VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6862{
6863 /* The lazy approach for now... */
6864 uint16_t const *pu16Src;
6865 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6866 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6867 if (rc == VINF_SUCCESS)
6868 {
6869 *pu16Dst = *pu16Src;
6870 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6871 }
6872 return rc;
6873}
6874
6875
6876#ifdef IEM_WITH_SETJMP
6877/**
6878 * Fetches a data word, longjmp on error.
6879 *
6880 * @returns The word
6881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6882 * @param iSegReg The index of the segment register to use for
6883 * this access. The base and limits are checked.
6884 * @param GCPtrMem The address of the guest memory.
6885 */
6886uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6887{
6888 /* The lazy approach for now... */
6889 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6890 sizeof(*pu16Src) - 1);
6891 uint16_t const u16Ret = *pu16Src;
6892 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6893 return u16Ret;
6894}
6895#endif
6896
6897
6898/**
6899 * Fetches a data dword.
6900 *
6901 * @returns Strict VBox status code.
6902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6903 * @param pu32Dst Where to return the dword.
6904 * @param iSegReg The index of the segment register to use for
6905 * this access. The base and limits are checked.
6906 * @param GCPtrMem The address of the guest memory.
6907 */
6908VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6909{
6910 /* The lazy approach for now... */
6911 uint32_t const *pu32Src;
6912 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6913 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6914 if (rc == VINF_SUCCESS)
6915 {
6916 *pu32Dst = *pu32Src;
6917 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6918 }
6919 return rc;
6920}
6921
6922
6923/**
6924 * Fetches a data dword and zero extends it to a qword.
6925 *
6926 * @returns Strict VBox status code.
6927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6928 * @param pu64Dst Where to return the qword.
6929 * @param iSegReg The index of the segment register to use for
6930 * this access. The base and limits are checked.
6931 * @param GCPtrMem The address of the guest memory.
6932 */
6933VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6934{
6935 /* The lazy approach for now... */
6936 uint32_t const *pu32Src;
6937 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6938 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6939 if (rc == VINF_SUCCESS)
6940 {
6941 *pu64Dst = *pu32Src;
6942 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6943 }
6944 return rc;
6945}
6946
6947
6948#ifdef IEM_WITH_SETJMP
6949
6950/**
6951 * Fetches a data dword, longjmp on error, fallback/safe version.
6952 *
6953 * @returns The dword
6954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6955 * @param iSegReg The index of the segment register to use for
6956 * this access. The base and limits are checked.
6957 * @param GCPtrMem The address of the guest memory.
6958 */
6959uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6960{
6961 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6962 sizeof(*pu32Src) - 1);
6963 uint32_t const u32Ret = *pu32Src;
6964 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6965 return u32Ret;
6966}
6967
6968
6969/**
6970 * Fetches a data dword, longjmp on error.
6971 *
6972 * @returns The dword
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 * @param iSegReg The index of the segment register to use for
6975 * this access. The base and limits are checked.
6976 * @param GCPtrMem The address of the guest memory.
6977 */
6978uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
6979{
6980# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6981 /*
6982 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6983 */
6984 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6985 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6986 {
6987 /*
6988 * TLB lookup.
6989 */
6990 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6991 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6992 if (pTlbe->uTag == uTag)
6993 {
6994 /*
6995 * Check TLB page table level access flags.
6996 */
6997 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6998 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6999 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
7000 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7001 {
7002 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
7003
7004 /*
7005 * Alignment check:
7006 */
7007 /** @todo check priority \#AC vs \#PF */
7008 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
7009 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
7010 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
7011 || pVCpu->iem.s.uCpl != 3)
7012 {
7013 /*
7014 * Fetch and return the dword
7015 */
7016 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
7017 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
7018 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
7019 }
7020 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
7021 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7022 }
7023 }
7024 }
7025
7026 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
7027 outdated page pointer, or other troubles. */
7028 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
7029 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
7030
7031# else
7032 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
7033 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7034 uint32_t const u32Ret = *pu32Src;
7035 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7036 return u32Ret;
7037# endif
7038}
7039#endif
7040
7041
7042#ifdef SOME_UNUSED_FUNCTION
7043/**
7044 * Fetches a data dword and sign extends it to a qword.
7045 *
7046 * @returns Strict VBox status code.
7047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7048 * @param pu64Dst Where to return the sign extended value.
7049 * @param iSegReg The index of the segment register to use for
7050 * this access. The base and limits are checked.
7051 * @param GCPtrMem The address of the guest memory.
7052 */
7053VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7054{
7055 /* The lazy approach for now... */
7056 int32_t const *pi32Src;
7057 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7058 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7059 if (rc == VINF_SUCCESS)
7060 {
7061 *pu64Dst = *pi32Src;
7062 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7063 }
7064#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7065 else
7066 *pu64Dst = 0;
7067#endif
7068 return rc;
7069}
7070#endif
7071
7072
7073/**
7074 * Fetches a data qword.
7075 *
7076 * @returns Strict VBox status code.
7077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7078 * @param pu64Dst Where to return the qword.
7079 * @param iSegReg The index of the segment register to use for
7080 * this access. The base and limits are checked.
7081 * @param GCPtrMem The address of the guest memory.
7082 */
7083VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7084{
7085 /* The lazy approach for now... */
7086 uint64_t const *pu64Src;
7087 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7088 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7089 if (rc == VINF_SUCCESS)
7090 {
7091 *pu64Dst = *pu64Src;
7092 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7093 }
7094 return rc;
7095}
7096
7097
7098#ifdef IEM_WITH_SETJMP
7099/**
7100 * Fetches a data qword, longjmp on error.
7101 *
7102 * @returns The qword.
7103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7104 * @param iSegReg The index of the segment register to use for
7105 * this access. The base and limits are checked.
7106 * @param GCPtrMem The address of the guest memory.
7107 */
7108uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7109{
7110 /* The lazy approach for now... */
7111 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
7112 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
7113 uint64_t const u64Ret = *pu64Src;
7114 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7115 return u64Ret;
7116}
7117#endif
7118
7119
7120/**
7121 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
7122 *
7123 * @returns Strict VBox status code.
7124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7125 * @param pu64Dst Where to return the qword.
7126 * @param iSegReg The index of the segment register to use for
7127 * this access. The base and limits are checked.
7128 * @param GCPtrMem The address of the guest memory.
7129 */
7130VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7131{
7132 /* The lazy approach for now... */
7133 uint64_t const *pu64Src;
7134 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
7135 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7136 if (rc == VINF_SUCCESS)
7137 {
7138 *pu64Dst = *pu64Src;
7139 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7140 }
7141 return rc;
7142}
7143
7144
7145#ifdef IEM_WITH_SETJMP
7146/**
7147 * Fetches a data qword, longjmp on error.
7148 *
7149 * @returns The qword.
7150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7151 * @param iSegReg The index of the segment register to use for
7152 * this access. The base and limits are checked.
7153 * @param GCPtrMem The address of the guest memory.
7154 */
7155uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7156{
7157 /* The lazy approach for now... */
7158 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7159 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7160 uint64_t const u64Ret = *pu64Src;
7161 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
7162 return u64Ret;
7163}
7164#endif
7165
7166
7167/**
7168 * Fetches a data tword.
7169 *
7170 * @returns Strict VBox status code.
7171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7172 * @param pr80Dst Where to return the tword.
7173 * @param iSegReg The index of the segment register to use for
7174 * this access. The base and limits are checked.
7175 * @param GCPtrMem The address of the guest memory.
7176 */
7177VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7178{
7179 /* The lazy approach for now... */
7180 PCRTFLOAT80U pr80Src;
7181 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7182 if (rc == VINF_SUCCESS)
7183 {
7184 *pr80Dst = *pr80Src;
7185 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7186 }
7187 return rc;
7188}
7189
7190
7191#ifdef IEM_WITH_SETJMP
7192/**
7193 * Fetches a data tword, longjmp on error.
7194 *
7195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7196 * @param pr80Dst Where to return the tword.
7197 * @param iSegReg The index of the segment register to use for
7198 * this access. The base and limits are checked.
7199 * @param GCPtrMem The address of the guest memory.
7200 */
7201void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7202{
7203 /* The lazy approach for now... */
7204 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7205 *pr80Dst = *pr80Src;
7206 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7207}
7208#endif
7209
7210
7211/**
7212 * Fetches a data decimal tword.
7213 *
7214 * @returns Strict VBox status code.
7215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7216 * @param pd80Dst Where to return the tword.
7217 * @param iSegReg The index of the segment register to use for
7218 * this access. The base and limits are checked.
7219 * @param GCPtrMem The address of the guest memory.
7220 */
7221VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7222{
7223 /* The lazy approach for now... */
7224 PCRTPBCD80U pd80Src;
7225 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7226 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7227 if (rc == VINF_SUCCESS)
7228 {
7229 *pd80Dst = *pd80Src;
7230 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7231 }
7232 return rc;
7233}
7234
7235
7236#ifdef IEM_WITH_SETJMP
7237/**
7238 * Fetches a data decimal tword, longjmp on error.
7239 *
7240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7241 * @param pd80Dst Where to return the tword.
7242 * @param iSegReg The index of the segment register to use for
7243 * this access. The base and limits are checked.
7244 * @param GCPtrMem The address of the guest memory.
7245 */
7246void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7247{
7248 /* The lazy approach for now... */
7249 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7250 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7251 *pd80Dst = *pd80Src;
7252 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7253}
7254#endif
7255
7256
7257/**
7258 * Fetches a data dqword (double qword), generally SSE related.
7259 *
7260 * @returns Strict VBox status code.
7261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7262 * @param pu128Dst Where to return the qword.
7263 * @param iSegReg The index of the segment register to use for
7264 * this access. The base and limits are checked.
7265 * @param GCPtrMem The address of the guest memory.
7266 */
7267VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7268{
7269 /* The lazy approach for now... */
7270 PCRTUINT128U pu128Src;
7271 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7272 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7273 if (rc == VINF_SUCCESS)
7274 {
7275 pu128Dst->au64[0] = pu128Src->au64[0];
7276 pu128Dst->au64[1] = pu128Src->au64[1];
7277 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7278 }
7279 return rc;
7280}
7281
7282
7283#ifdef IEM_WITH_SETJMP
7284/**
7285 * Fetches a data dqword (double qword), generally SSE related.
7286 *
7287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7288 * @param pu128Dst Where to return the qword.
7289 * @param iSegReg The index of the segment register to use for
7290 * this access. The base and limits are checked.
7291 * @param GCPtrMem The address of the guest memory.
7292 */
7293void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7294{
7295 /* The lazy approach for now... */
7296 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7297 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7298 pu128Dst->au64[0] = pu128Src->au64[0];
7299 pu128Dst->au64[1] = pu128Src->au64[1];
7300 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7301}
7302#endif
7303
7304
7305/**
7306 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7307 * related.
7308 *
7309 * Raises \#GP(0) if not aligned.
7310 *
7311 * @returns Strict VBox status code.
7312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7313 * @param pu128Dst Where to return the qword.
7314 * @param iSegReg The index of the segment register to use for
7315 * this access. The base and limits are checked.
7316 * @param GCPtrMem The address of the guest memory.
7317 */
7318VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7319{
7320 /* The lazy approach for now... */
7321 PCRTUINT128U pu128Src;
7322 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7323 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7324 if (rc == VINF_SUCCESS)
7325 {
7326 pu128Dst->au64[0] = pu128Src->au64[0];
7327 pu128Dst->au64[1] = pu128Src->au64[1];
7328 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7329 }
7330 return rc;
7331}
7332
7333
7334#ifdef IEM_WITH_SETJMP
7335/**
7336 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7337 * related, longjmp on error.
7338 *
7339 * Raises \#GP(0) if not aligned.
7340 *
7341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7342 * @param pu128Dst Where to return the qword.
7343 * @param iSegReg The index of the segment register to use for
7344 * this access. The base and limits are checked.
7345 * @param GCPtrMem The address of the guest memory.
7346 */
7347void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7348 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7349{
7350 /* The lazy approach for now... */
7351 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7352 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7353 pu128Dst->au64[0] = pu128Src->au64[0];
7354 pu128Dst->au64[1] = pu128Src->au64[1];
7355 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7356}
7357#endif
7358
7359
7360/**
7361 * Fetches a data oword (octo word), generally AVX related.
7362 *
7363 * @returns Strict VBox status code.
7364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7365 * @param pu256Dst Where to return the qword.
7366 * @param iSegReg The index of the segment register to use for
7367 * this access. The base and limits are checked.
7368 * @param GCPtrMem The address of the guest memory.
7369 */
7370VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7371{
7372 /* The lazy approach for now... */
7373 PCRTUINT256U pu256Src;
7374 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7375 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7376 if (rc == VINF_SUCCESS)
7377 {
7378 pu256Dst->au64[0] = pu256Src->au64[0];
7379 pu256Dst->au64[1] = pu256Src->au64[1];
7380 pu256Dst->au64[2] = pu256Src->au64[2];
7381 pu256Dst->au64[3] = pu256Src->au64[3];
7382 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7383 }
7384 return rc;
7385}
7386
7387
7388#ifdef IEM_WITH_SETJMP
7389/**
7390 * Fetches a data oword (octo word), generally AVX related.
7391 *
7392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7393 * @param pu256Dst Where to return the qword.
7394 * @param iSegReg The index of the segment register to use for
7395 * this access. The base and limits are checked.
7396 * @param GCPtrMem The address of the guest memory.
7397 */
7398void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7399{
7400 /* The lazy approach for now... */
7401 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7402 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7403 pu256Dst->au64[0] = pu256Src->au64[0];
7404 pu256Dst->au64[1] = pu256Src->au64[1];
7405 pu256Dst->au64[2] = pu256Src->au64[2];
7406 pu256Dst->au64[3] = pu256Src->au64[3];
7407 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7408}
7409#endif
7410
7411
7412/**
7413 * Fetches a data oword (octo word) at an aligned address, generally AVX
7414 * related.
7415 *
7416 * Raises \#GP(0) if not aligned.
7417 *
7418 * @returns Strict VBox status code.
7419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7420 * @param pu256Dst Where to return the qword.
7421 * @param iSegReg The index of the segment register to use for
7422 * this access. The base and limits are checked.
7423 * @param GCPtrMem The address of the guest memory.
7424 */
7425VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7426{
7427 /* The lazy approach for now... */
7428 PCRTUINT256U pu256Src;
7429 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7430 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7431 if (rc == VINF_SUCCESS)
7432 {
7433 pu256Dst->au64[0] = pu256Src->au64[0];
7434 pu256Dst->au64[1] = pu256Src->au64[1];
7435 pu256Dst->au64[2] = pu256Src->au64[2];
7436 pu256Dst->au64[3] = pu256Src->au64[3];
7437 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7438 }
7439 return rc;
7440}
7441
7442
7443#ifdef IEM_WITH_SETJMP
7444/**
7445 * Fetches a data oword (octo word) at an aligned address, generally AVX
7446 * related, longjmp on error.
7447 *
7448 * Raises \#GP(0) if not aligned.
7449 *
7450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7451 * @param pu256Dst Where to return the qword.
7452 * @param iSegReg The index of the segment register to use for
7453 * this access. The base and limits are checked.
7454 * @param GCPtrMem The address of the guest memory.
7455 */
7456void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7457 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7458{
7459 /* The lazy approach for now... */
7460 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7461 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7462 pu256Dst->au64[0] = pu256Src->au64[0];
7463 pu256Dst->au64[1] = pu256Src->au64[1];
7464 pu256Dst->au64[2] = pu256Src->au64[2];
7465 pu256Dst->au64[3] = pu256Src->au64[3];
7466 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7467}
7468#endif
7469
7470
7471
7472/**
7473 * Fetches a descriptor register (lgdt, lidt).
7474 *
7475 * @returns Strict VBox status code.
7476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7477 * @param pcbLimit Where to return the limit.
7478 * @param pGCPtrBase Where to return the base.
7479 * @param iSegReg The index of the segment register to use for
7480 * this access. The base and limits are checked.
7481 * @param GCPtrMem The address of the guest memory.
7482 * @param enmOpSize The effective operand size.
7483 */
7484VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7485 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7486{
7487 /*
7488 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7489 * little special:
7490 * - The two reads are done separately.
7491 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7492 * - We suspect the 386 to actually commit the limit before the base in
7493 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7494 * don't try emulate this eccentric behavior, because it's not well
7495 * enough understood and rather hard to trigger.
7496 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7497 */
7498 VBOXSTRICTRC rcStrict;
7499 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7500 {
7501 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7502 if (rcStrict == VINF_SUCCESS)
7503 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7504 }
7505 else
7506 {
7507 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7508 if (enmOpSize == IEMMODE_32BIT)
7509 {
7510 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7511 {
7512 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7513 if (rcStrict == VINF_SUCCESS)
7514 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7515 }
7516 else
7517 {
7518 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7519 if (rcStrict == VINF_SUCCESS)
7520 {
7521 *pcbLimit = (uint16_t)uTmp;
7522 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7523 }
7524 }
7525 if (rcStrict == VINF_SUCCESS)
7526 *pGCPtrBase = uTmp;
7527 }
7528 else
7529 {
7530 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7531 if (rcStrict == VINF_SUCCESS)
7532 {
7533 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7534 if (rcStrict == VINF_SUCCESS)
7535 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7536 }
7537 }
7538 }
7539 return rcStrict;
7540}
7541
7542
7543
7544/**
7545 * Stores a data byte.
7546 *
7547 * @returns Strict VBox status code.
7548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7549 * @param iSegReg The index of the segment register to use for
7550 * this access. The base and limits are checked.
7551 * @param GCPtrMem The address of the guest memory.
7552 * @param u8Value The value to store.
7553 */
7554VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7555{
7556 /* The lazy approach for now... */
7557 uint8_t *pu8Dst;
7558 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7559 if (rc == VINF_SUCCESS)
7560 {
7561 *pu8Dst = u8Value;
7562 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7563 }
7564 return rc;
7565}
7566
7567
7568#ifdef IEM_WITH_SETJMP
7569/**
7570 * Stores a data byte, longjmp on error.
7571 *
7572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7573 * @param iSegReg The index of the segment register to use for
7574 * this access. The base and limits are checked.
7575 * @param GCPtrMem The address of the guest memory.
7576 * @param u8Value The value to store.
7577 */
7578void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP
7579{
7580 /* The lazy approach for now... */
7581 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7582 *pu8Dst = u8Value;
7583 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7584}
7585#endif
7586
7587
7588/**
7589 * Stores a data word.
7590 *
7591 * @returns Strict VBox status code.
7592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7593 * @param iSegReg The index of the segment register to use for
7594 * this access. The base and limits are checked.
7595 * @param GCPtrMem The address of the guest memory.
7596 * @param u16Value The value to store.
7597 */
7598VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7599{
7600 /* The lazy approach for now... */
7601 uint16_t *pu16Dst;
7602 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7603 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7604 if (rc == VINF_SUCCESS)
7605 {
7606 *pu16Dst = u16Value;
7607 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7608 }
7609 return rc;
7610}
7611
7612
7613#ifdef IEM_WITH_SETJMP
7614/**
7615 * Stores a data word, longjmp on error.
7616 *
7617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7618 * @param iSegReg The index of the segment register to use for
7619 * this access. The base and limits are checked.
7620 * @param GCPtrMem The address of the guest memory.
7621 * @param u16Value The value to store.
7622 */
7623void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP
7624{
7625 /* The lazy approach for now... */
7626 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7627 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7628 *pu16Dst = u16Value;
7629 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7630}
7631#endif
7632
7633
7634/**
7635 * Stores a data dword.
7636 *
7637 * @returns Strict VBox status code.
7638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7639 * @param iSegReg The index of the segment register to use for
7640 * this access. The base and limits are checked.
7641 * @param GCPtrMem The address of the guest memory.
7642 * @param u32Value The value to store.
7643 */
7644VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7645{
7646 /* The lazy approach for now... */
7647 uint32_t *pu32Dst;
7648 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7649 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7650 if (rc == VINF_SUCCESS)
7651 {
7652 *pu32Dst = u32Value;
7653 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7654 }
7655 return rc;
7656}
7657
7658
7659#ifdef IEM_WITH_SETJMP
7660/**
7661 * Stores a data dword.
7662 *
7663 * @returns Strict VBox status code.
7664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7665 * @param iSegReg The index of the segment register to use for
7666 * this access. The base and limits are checked.
7667 * @param GCPtrMem The address of the guest memory.
7668 * @param u32Value The value to store.
7669 */
7670void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP
7671{
7672 /* The lazy approach for now... */
7673 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7674 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7675 *pu32Dst = u32Value;
7676 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7677}
7678#endif
7679
7680
7681/**
7682 * Stores a data qword.
7683 *
7684 * @returns Strict VBox status code.
7685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7686 * @param iSegReg The index of the segment register to use for
7687 * this access. The base and limits are checked.
7688 * @param GCPtrMem The address of the guest memory.
7689 * @param u64Value The value to store.
7690 */
7691VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7692{
7693 /* The lazy approach for now... */
7694 uint64_t *pu64Dst;
7695 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7696 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7697 if (rc == VINF_SUCCESS)
7698 {
7699 *pu64Dst = u64Value;
7700 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7701 }
7702 return rc;
7703}
7704
7705
7706#ifdef IEM_WITH_SETJMP
7707/**
7708 * Stores a data qword, longjmp on error.
7709 *
7710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7711 * @param iSegReg The index of the segment register to use for
7712 * this access. The base and limits are checked.
7713 * @param GCPtrMem The address of the guest memory.
7714 * @param u64Value The value to store.
7715 */
7716void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP
7717{
7718 /* The lazy approach for now... */
7719 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7720 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7721 *pu64Dst = u64Value;
7722 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7723}
7724#endif
7725
7726
7727/**
7728 * Stores a data dqword.
7729 *
7730 * @returns Strict VBox status code.
7731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7732 * @param iSegReg The index of the segment register to use for
7733 * this access. The base and limits are checked.
7734 * @param GCPtrMem The address of the guest memory.
7735 * @param u128Value The value to store.
7736 */
7737VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7738{
7739 /* The lazy approach for now... */
7740 PRTUINT128U pu128Dst;
7741 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7742 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7743 if (rc == VINF_SUCCESS)
7744 {
7745 pu128Dst->au64[0] = u128Value.au64[0];
7746 pu128Dst->au64[1] = u128Value.au64[1];
7747 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7748 }
7749 return rc;
7750}
7751
7752
7753#ifdef IEM_WITH_SETJMP
7754/**
7755 * Stores a data dqword, longjmp on error.
7756 *
7757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7758 * @param iSegReg The index of the segment register to use for
7759 * this access. The base and limits are checked.
7760 * @param GCPtrMem The address of the guest memory.
7761 * @param u128Value The value to store.
7762 */
7763void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7764{
7765 /* The lazy approach for now... */
7766 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7767 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7768 pu128Dst->au64[0] = u128Value.au64[0];
7769 pu128Dst->au64[1] = u128Value.au64[1];
7770 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7771}
7772#endif
7773
7774
7775/**
7776 * Stores a data dqword, SSE aligned.
7777 *
7778 * @returns Strict VBox status code.
7779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7780 * @param iSegReg The index of the segment register to use for
7781 * this access. The base and limits are checked.
7782 * @param GCPtrMem The address of the guest memory.
7783 * @param u128Value The value to store.
7784 */
7785VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7786{
7787 /* The lazy approach for now... */
7788 PRTUINT128U pu128Dst;
7789 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7790 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7791 if (rc == VINF_SUCCESS)
7792 {
7793 pu128Dst->au64[0] = u128Value.au64[0];
7794 pu128Dst->au64[1] = u128Value.au64[1];
7795 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7796 }
7797 return rc;
7798}
7799
7800
7801#ifdef IEM_WITH_SETJMP
7802/**
7803 * Stores a data dqword, SSE aligned.
7804 *
7805 * @returns Strict VBox status code.
7806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7807 * @param iSegReg The index of the segment register to use for
7808 * this access. The base and limits are checked.
7809 * @param GCPtrMem The address of the guest memory.
7810 * @param u128Value The value to store.
7811 */
7812void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7813 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7814{
7815 /* The lazy approach for now... */
7816 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7817 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7818 pu128Dst->au64[0] = u128Value.au64[0];
7819 pu128Dst->au64[1] = u128Value.au64[1];
7820 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7821}
7822#endif
7823
7824
7825/**
7826 * Stores a data dqword.
7827 *
7828 * @returns Strict VBox status code.
7829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7830 * @param iSegReg The index of the segment register to use for
7831 * this access. The base and limits are checked.
7832 * @param GCPtrMem The address of the guest memory.
7833 * @param pu256Value Pointer to the value to store.
7834 */
7835VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7836{
7837 /* The lazy approach for now... */
7838 PRTUINT256U pu256Dst;
7839 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7840 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7841 if (rc == VINF_SUCCESS)
7842 {
7843 pu256Dst->au64[0] = pu256Value->au64[0];
7844 pu256Dst->au64[1] = pu256Value->au64[1];
7845 pu256Dst->au64[2] = pu256Value->au64[2];
7846 pu256Dst->au64[3] = pu256Value->au64[3];
7847 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7848 }
7849 return rc;
7850}
7851
7852
7853#ifdef IEM_WITH_SETJMP
7854/**
7855 * Stores a data dqword, longjmp on error.
7856 *
7857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7858 * @param iSegReg The index of the segment register to use for
7859 * this access. The base and limits are checked.
7860 * @param GCPtrMem The address of the guest memory.
7861 * @param pu256Value Pointer to the value to store.
7862 */
7863void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7864{
7865 /* The lazy approach for now... */
7866 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7867 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7868 pu256Dst->au64[0] = pu256Value->au64[0];
7869 pu256Dst->au64[1] = pu256Value->au64[1];
7870 pu256Dst->au64[2] = pu256Value->au64[2];
7871 pu256Dst->au64[3] = pu256Value->au64[3];
7872 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7873}
7874#endif
7875
7876
7877/**
7878 * Stores a data dqword, AVX \#GP(0) aligned.
7879 *
7880 * @returns Strict VBox status code.
7881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7882 * @param iSegReg The index of the segment register to use for
7883 * this access. The base and limits are checked.
7884 * @param GCPtrMem The address of the guest memory.
7885 * @param pu256Value Pointer to the value to store.
7886 */
7887VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7888{
7889 /* The lazy approach for now... */
7890 PRTUINT256U pu256Dst;
7891 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7892 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7893 if (rc == VINF_SUCCESS)
7894 {
7895 pu256Dst->au64[0] = pu256Value->au64[0];
7896 pu256Dst->au64[1] = pu256Value->au64[1];
7897 pu256Dst->au64[2] = pu256Value->au64[2];
7898 pu256Dst->au64[3] = pu256Value->au64[3];
7899 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7900 }
7901 return rc;
7902}
7903
7904
7905#ifdef IEM_WITH_SETJMP
7906/**
7907 * Stores a data dqword, AVX aligned.
7908 *
7909 * @returns Strict VBox status code.
7910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7911 * @param iSegReg The index of the segment register to use for
7912 * this access. The base and limits are checked.
7913 * @param GCPtrMem The address of the guest memory.
7914 * @param pu256Value Pointer to the value to store.
7915 */
7916void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7917 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7918{
7919 /* The lazy approach for now... */
7920 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7921 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7922 pu256Dst->au64[0] = pu256Value->au64[0];
7923 pu256Dst->au64[1] = pu256Value->au64[1];
7924 pu256Dst->au64[2] = pu256Value->au64[2];
7925 pu256Dst->au64[3] = pu256Value->au64[3];
7926 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7927}
7928#endif
7929
7930
7931/**
7932 * Stores a descriptor register (sgdt, sidt).
7933 *
7934 * @returns Strict VBox status code.
7935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7936 * @param cbLimit The limit.
7937 * @param GCPtrBase The base address.
7938 * @param iSegReg The index of the segment register to use for
7939 * this access. The base and limits are checked.
7940 * @param GCPtrMem The address of the guest memory.
7941 */
7942VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7943{
7944 /*
7945 * The SIDT and SGDT instructions actually stores the data using two
7946 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7947 * does not respond to opsize prefixes.
7948 */
7949 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7950 if (rcStrict == VINF_SUCCESS)
7951 {
7952 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7953 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7954 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7955 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7956 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7957 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7958 else
7959 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7960 }
7961 return rcStrict;
7962}
7963
7964
7965/**
7966 * Pushes a word onto the stack.
7967 *
7968 * @returns Strict VBox status code.
7969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7970 * @param u16Value The value to push.
7971 */
7972VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7973{
7974 /* Increment the stack pointer. */
7975 uint64_t uNewRsp;
7976 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7977
7978 /* Write the word the lazy way. */
7979 uint16_t *pu16Dst;
7980 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7981 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7982 if (rc == VINF_SUCCESS)
7983 {
7984 *pu16Dst = u16Value;
7985 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7986 }
7987
7988 /* Commit the new RSP value unless we an access handler made trouble. */
7989 if (rc == VINF_SUCCESS)
7990 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7991
7992 return rc;
7993}
7994
7995
7996/**
7997 * Pushes a dword onto the stack.
7998 *
7999 * @returns Strict VBox status code.
8000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8001 * @param u32Value The value to push.
8002 */
8003VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8004{
8005 /* Increment the stack pointer. */
8006 uint64_t uNewRsp;
8007 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8008
8009 /* Write the dword the lazy way. */
8010 uint32_t *pu32Dst;
8011 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8012 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8013 if (rc == VINF_SUCCESS)
8014 {
8015 *pu32Dst = u32Value;
8016 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8017 }
8018
8019 /* Commit the new RSP value unless we an access handler made trouble. */
8020 if (rc == VINF_SUCCESS)
8021 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8022
8023 return rc;
8024}
8025
8026
8027/**
8028 * Pushes a dword segment register value onto the stack.
8029 *
8030 * @returns Strict VBox status code.
8031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8032 * @param u32Value The value to push.
8033 */
8034VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
8035{
8036 /* Increment the stack pointer. */
8037 uint64_t uNewRsp;
8038 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
8039
8040 /* The intel docs talks about zero extending the selector register
8041 value. My actual intel CPU here might be zero extending the value
8042 but it still only writes the lower word... */
8043 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
8044 * happens when crossing an electric page boundrary, is the high word checked
8045 * for write accessibility or not? Probably it is. What about segment limits?
8046 * It appears this behavior is also shared with trap error codes.
8047 *
8048 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
8049 * ancient hardware when it actually did change. */
8050 uint16_t *pu16Dst;
8051 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
8052 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
8053 if (rc == VINF_SUCCESS)
8054 {
8055 *pu16Dst = (uint16_t)u32Value;
8056 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
8057 }
8058
8059 /* Commit the new RSP value unless we an access handler made trouble. */
8060 if (rc == VINF_SUCCESS)
8061 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8062
8063 return rc;
8064}
8065
8066
8067/**
8068 * Pushes a qword onto the stack.
8069 *
8070 * @returns Strict VBox status code.
8071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8072 * @param u64Value The value to push.
8073 */
8074VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
8075{
8076 /* Increment the stack pointer. */
8077 uint64_t uNewRsp;
8078 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
8079
8080 /* Write the word the lazy way. */
8081 uint64_t *pu64Dst;
8082 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8083 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8084 if (rc == VINF_SUCCESS)
8085 {
8086 *pu64Dst = u64Value;
8087 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8088 }
8089
8090 /* Commit the new RSP value unless we an access handler made trouble. */
8091 if (rc == VINF_SUCCESS)
8092 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8093
8094 return rc;
8095}
8096
8097
8098/**
8099 * Pops a word from the stack.
8100 *
8101 * @returns Strict VBox status code.
8102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8103 * @param pu16Value Where to store the popped value.
8104 */
8105VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
8106{
8107 /* Increment the stack pointer. */
8108 uint64_t uNewRsp;
8109 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
8110
8111 /* Write the word the lazy way. */
8112 uint16_t const *pu16Src;
8113 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8114 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8115 if (rc == VINF_SUCCESS)
8116 {
8117 *pu16Value = *pu16Src;
8118 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8119
8120 /* Commit the new RSP value. */
8121 if (rc == VINF_SUCCESS)
8122 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8123 }
8124
8125 return rc;
8126}
8127
8128
8129/**
8130 * Pops a dword from the stack.
8131 *
8132 * @returns Strict VBox status code.
8133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8134 * @param pu32Value Where to store the popped value.
8135 */
8136VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
8137{
8138 /* Increment the stack pointer. */
8139 uint64_t uNewRsp;
8140 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
8141
8142 /* Write the word the lazy way. */
8143 uint32_t const *pu32Src;
8144 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8145 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8146 if (rc == VINF_SUCCESS)
8147 {
8148 *pu32Value = *pu32Src;
8149 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8150
8151 /* Commit the new RSP value. */
8152 if (rc == VINF_SUCCESS)
8153 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8154 }
8155
8156 return rc;
8157}
8158
8159
8160/**
8161 * Pops a qword from the stack.
8162 *
8163 * @returns Strict VBox status code.
8164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8165 * @param pu64Value Where to store the popped value.
8166 */
8167VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
8168{
8169 /* Increment the stack pointer. */
8170 uint64_t uNewRsp;
8171 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
8172
8173 /* Write the word the lazy way. */
8174 uint64_t const *pu64Src;
8175 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8176 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8177 if (rc == VINF_SUCCESS)
8178 {
8179 *pu64Value = *pu64Src;
8180 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8181
8182 /* Commit the new RSP value. */
8183 if (rc == VINF_SUCCESS)
8184 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8185 }
8186
8187 return rc;
8188}
8189
8190
8191/**
8192 * Pushes a word onto the stack, using a temporary stack pointer.
8193 *
8194 * @returns Strict VBox status code.
8195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8196 * @param u16Value The value to push.
8197 * @param pTmpRsp Pointer to the temporary stack pointer.
8198 */
8199VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8200{
8201 /* Increment the stack pointer. */
8202 RTUINT64U NewRsp = *pTmpRsp;
8203 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
8204
8205 /* Write the word the lazy way. */
8206 uint16_t *pu16Dst;
8207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8208 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8209 if (rc == VINF_SUCCESS)
8210 {
8211 *pu16Dst = u16Value;
8212 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8213 }
8214
8215 /* Commit the new RSP value unless we an access handler made trouble. */
8216 if (rc == VINF_SUCCESS)
8217 *pTmpRsp = NewRsp;
8218
8219 return rc;
8220}
8221
8222
8223/**
8224 * Pushes a dword onto the stack, using a temporary stack pointer.
8225 *
8226 * @returns Strict VBox status code.
8227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8228 * @param u32Value The value to push.
8229 * @param pTmpRsp Pointer to the temporary stack pointer.
8230 */
8231VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8232{
8233 /* Increment the stack pointer. */
8234 RTUINT64U NewRsp = *pTmpRsp;
8235 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8236
8237 /* Write the word the lazy way. */
8238 uint32_t *pu32Dst;
8239 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8240 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8241 if (rc == VINF_SUCCESS)
8242 {
8243 *pu32Dst = u32Value;
8244 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8245 }
8246
8247 /* Commit the new RSP value unless we an access handler made trouble. */
8248 if (rc == VINF_SUCCESS)
8249 *pTmpRsp = NewRsp;
8250
8251 return rc;
8252}
8253
8254
8255/**
8256 * Pushes a dword onto the stack, using a temporary stack pointer.
8257 *
8258 * @returns Strict VBox status code.
8259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8260 * @param u64Value The value to push.
8261 * @param pTmpRsp Pointer to the temporary stack pointer.
8262 */
8263VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8264{
8265 /* Increment the stack pointer. */
8266 RTUINT64U NewRsp = *pTmpRsp;
8267 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8268
8269 /* Write the word the lazy way. */
8270 uint64_t *pu64Dst;
8271 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8272 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8273 if (rc == VINF_SUCCESS)
8274 {
8275 *pu64Dst = u64Value;
8276 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8277 }
8278
8279 /* Commit the new RSP value unless we an access handler made trouble. */
8280 if (rc == VINF_SUCCESS)
8281 *pTmpRsp = NewRsp;
8282
8283 return rc;
8284}
8285
8286
8287/**
8288 * Pops a word from the stack, using a temporary stack pointer.
8289 *
8290 * @returns Strict VBox status code.
8291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8292 * @param pu16Value Where to store the popped value.
8293 * @param pTmpRsp Pointer to the temporary stack pointer.
8294 */
8295VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8296{
8297 /* Increment the stack pointer. */
8298 RTUINT64U NewRsp = *pTmpRsp;
8299 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8300
8301 /* Write the word the lazy way. */
8302 uint16_t const *pu16Src;
8303 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8304 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8305 if (rc == VINF_SUCCESS)
8306 {
8307 *pu16Value = *pu16Src;
8308 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8309
8310 /* Commit the new RSP value. */
8311 if (rc == VINF_SUCCESS)
8312 *pTmpRsp = NewRsp;
8313 }
8314
8315 return rc;
8316}
8317
8318
8319/**
8320 * Pops a dword from the stack, using a temporary stack pointer.
8321 *
8322 * @returns Strict VBox status code.
8323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8324 * @param pu32Value Where to store the popped value.
8325 * @param pTmpRsp Pointer to the temporary stack pointer.
8326 */
8327VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8328{
8329 /* Increment the stack pointer. */
8330 RTUINT64U NewRsp = *pTmpRsp;
8331 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8332
8333 /* Write the word the lazy way. */
8334 uint32_t const *pu32Src;
8335 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8336 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8337 if (rc == VINF_SUCCESS)
8338 {
8339 *pu32Value = *pu32Src;
8340 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8341
8342 /* Commit the new RSP value. */
8343 if (rc == VINF_SUCCESS)
8344 *pTmpRsp = NewRsp;
8345 }
8346
8347 return rc;
8348}
8349
8350
8351/**
8352 * Pops a qword from the stack, using a temporary stack pointer.
8353 *
8354 * @returns Strict VBox status code.
8355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8356 * @param pu64Value Where to store the popped value.
8357 * @param pTmpRsp Pointer to the temporary stack pointer.
8358 */
8359VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8360{
8361 /* Increment the stack pointer. */
8362 RTUINT64U NewRsp = *pTmpRsp;
8363 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8364
8365 /* Write the word the lazy way. */
8366 uint64_t const *pu64Src;
8367 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8368 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8369 if (rcStrict == VINF_SUCCESS)
8370 {
8371 *pu64Value = *pu64Src;
8372 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8373
8374 /* Commit the new RSP value. */
8375 if (rcStrict == VINF_SUCCESS)
8376 *pTmpRsp = NewRsp;
8377 }
8378
8379 return rcStrict;
8380}
8381
8382
8383/**
8384 * Begin a special stack push (used by interrupt, exceptions and such).
8385 *
8386 * This will raise \#SS or \#PF if appropriate.
8387 *
8388 * @returns Strict VBox status code.
8389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8390 * @param cbMem The number of bytes to push onto the stack.
8391 * @param cbAlign The alignment mask (7, 3, 1).
8392 * @param ppvMem Where to return the pointer to the stack memory.
8393 * As with the other memory functions this could be
8394 * direct access or bounce buffered access, so
8395 * don't commit register until the commit call
8396 * succeeds.
8397 * @param puNewRsp Where to return the new RSP value. This must be
8398 * passed unchanged to
8399 * iemMemStackPushCommitSpecial().
8400 */
8401VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8402 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8403{
8404 Assert(cbMem < UINT8_MAX);
8405 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8406 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8407 IEM_ACCESS_STACK_W, cbAlign);
8408}
8409
8410
8411/**
8412 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8413 *
8414 * This will update the rSP.
8415 *
8416 * @returns Strict VBox status code.
8417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8418 * @param pvMem The pointer returned by
8419 * iemMemStackPushBeginSpecial().
8420 * @param uNewRsp The new RSP value returned by
8421 * iemMemStackPushBeginSpecial().
8422 */
8423VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8424{
8425 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8426 if (rcStrict == VINF_SUCCESS)
8427 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8428 return rcStrict;
8429}
8430
8431
8432/**
8433 * Begin a special stack pop (used by iret, retf and such).
8434 *
8435 * This will raise \#SS or \#PF if appropriate.
8436 *
8437 * @returns Strict VBox status code.
8438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8439 * @param cbMem The number of bytes to pop from the stack.
8440 * @param cbAlign The alignment mask (7, 3, 1).
8441 * @param ppvMem Where to return the pointer to the stack memory.
8442 * @param puNewRsp Where to return the new RSP value. This must be
8443 * assigned to CPUMCTX::rsp manually some time
8444 * after iemMemStackPopDoneSpecial() has been
8445 * called.
8446 */
8447VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8448 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8449{
8450 Assert(cbMem < UINT8_MAX);
8451 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8452 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8453}
8454
8455
8456/**
8457 * Continue a special stack pop (used by iret and retf), for the purpose of
8458 * retrieving a new stack pointer.
8459 *
8460 * This will raise \#SS or \#PF if appropriate.
8461 *
8462 * @returns Strict VBox status code.
8463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8464 * @param off Offset from the top of the stack. This is zero
8465 * except in the retf case.
8466 * @param cbMem The number of bytes to pop from the stack.
8467 * @param ppvMem Where to return the pointer to the stack memory.
8468 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8469 * return this because all use of this function is
8470 * to retrieve a new value and anything we return
8471 * here would be discarded.)
8472 */
8473VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8474 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
8475{
8476 Assert(cbMem < UINT8_MAX);
8477
8478 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8479 RTGCPTR GCPtrTop;
8480 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8481 GCPtrTop = uCurNewRsp;
8482 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8483 GCPtrTop = (uint32_t)uCurNewRsp;
8484 else
8485 GCPtrTop = (uint16_t)uCurNewRsp;
8486
8487 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8488 0 /* checked in iemMemStackPopBeginSpecial */);
8489}
8490
8491
8492/**
8493 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8494 * iemMemStackPopContinueSpecial).
8495 *
8496 * The caller will manually commit the rSP.
8497 *
8498 * @returns Strict VBox status code.
8499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8500 * @param pvMem The pointer returned by
8501 * iemMemStackPopBeginSpecial() or
8502 * iemMemStackPopContinueSpecial().
8503 */
8504VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8505{
8506 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8507}
8508
8509
8510/**
8511 * Fetches a system table byte.
8512 *
8513 * @returns Strict VBox status code.
8514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8515 * @param pbDst Where to return the byte.
8516 * @param iSegReg The index of the segment register to use for
8517 * this access. The base and limits are checked.
8518 * @param GCPtrMem The address of the guest memory.
8519 */
8520VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8521{
8522 /* The lazy approach for now... */
8523 uint8_t const *pbSrc;
8524 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8525 if (rc == VINF_SUCCESS)
8526 {
8527 *pbDst = *pbSrc;
8528 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8529 }
8530 return rc;
8531}
8532
8533
8534/**
8535 * Fetches a system table word.
8536 *
8537 * @returns Strict VBox status code.
8538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8539 * @param pu16Dst Where to return the word.
8540 * @param iSegReg The index of the segment register to use for
8541 * this access. The base and limits are checked.
8542 * @param GCPtrMem The address of the guest memory.
8543 */
8544VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8545{
8546 /* The lazy approach for now... */
8547 uint16_t const *pu16Src;
8548 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8549 if (rc == VINF_SUCCESS)
8550 {
8551 *pu16Dst = *pu16Src;
8552 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8553 }
8554 return rc;
8555}
8556
8557
8558/**
8559 * Fetches a system table dword.
8560 *
8561 * @returns Strict VBox status code.
8562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8563 * @param pu32Dst Where to return the dword.
8564 * @param iSegReg The index of the segment register to use for
8565 * this access. The base and limits are checked.
8566 * @param GCPtrMem The address of the guest memory.
8567 */
8568VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8569{
8570 /* The lazy approach for now... */
8571 uint32_t const *pu32Src;
8572 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8573 if (rc == VINF_SUCCESS)
8574 {
8575 *pu32Dst = *pu32Src;
8576 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8577 }
8578 return rc;
8579}
8580
8581
8582/**
8583 * Fetches a system table qword.
8584 *
8585 * @returns Strict VBox status code.
8586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8587 * @param pu64Dst Where to return the qword.
8588 * @param iSegReg The index of the segment register to use for
8589 * this access. The base and limits are checked.
8590 * @param GCPtrMem The address of the guest memory.
8591 */
8592VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8593{
8594 /* The lazy approach for now... */
8595 uint64_t const *pu64Src;
8596 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8597 if (rc == VINF_SUCCESS)
8598 {
8599 *pu64Dst = *pu64Src;
8600 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8601 }
8602 return rc;
8603}
8604
8605
8606/**
8607 * Fetches a descriptor table entry with caller specified error code.
8608 *
8609 * @returns Strict VBox status code.
8610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8611 * @param pDesc Where to return the descriptor table entry.
8612 * @param uSel The selector which table entry to fetch.
8613 * @param uXcpt The exception to raise on table lookup error.
8614 * @param uErrorCode The error code associated with the exception.
8615 */
8616static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8617 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8618{
8619 AssertPtr(pDesc);
8620 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8621
8622 /** @todo did the 286 require all 8 bytes to be accessible? */
8623 /*
8624 * Get the selector table base and check bounds.
8625 */
8626 RTGCPTR GCPtrBase;
8627 if (uSel & X86_SEL_LDT)
8628 {
8629 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8630 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8631 {
8632 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8633 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8634 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8635 uErrorCode, 0);
8636 }
8637
8638 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8639 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8640 }
8641 else
8642 {
8643 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8644 {
8645 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8646 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8647 uErrorCode, 0);
8648 }
8649 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8650 }
8651
8652 /*
8653 * Read the legacy descriptor and maybe the long mode extensions if
8654 * required.
8655 */
8656 VBOXSTRICTRC rcStrict;
8657 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8658 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8659 else
8660 {
8661 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8662 if (rcStrict == VINF_SUCCESS)
8663 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8664 if (rcStrict == VINF_SUCCESS)
8665 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8666 if (rcStrict == VINF_SUCCESS)
8667 pDesc->Legacy.au16[3] = 0;
8668 else
8669 return rcStrict;
8670 }
8671
8672 if (rcStrict == VINF_SUCCESS)
8673 {
8674 if ( !IEM_IS_LONG_MODE(pVCpu)
8675 || pDesc->Legacy.Gen.u1DescType)
8676 pDesc->Long.au64[1] = 0;
8677 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8678 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8679 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8680 else
8681 {
8682 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8683 /** @todo is this the right exception? */
8684 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8685 }
8686 }
8687 return rcStrict;
8688}
8689
8690
8691/**
8692 * Fetches a descriptor table entry.
8693 *
8694 * @returns Strict VBox status code.
8695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8696 * @param pDesc Where to return the descriptor table entry.
8697 * @param uSel The selector which table entry to fetch.
8698 * @param uXcpt The exception to raise on table lookup error.
8699 */
8700VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8701{
8702 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8703}
8704
8705
8706/**
8707 * Marks the selector descriptor as accessed (only non-system descriptors).
8708 *
8709 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8710 * will therefore skip the limit checks.
8711 *
8712 * @returns Strict VBox status code.
8713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8714 * @param uSel The selector.
8715 */
8716VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8717{
8718 /*
8719 * Get the selector table base and calculate the entry address.
8720 */
8721 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8722 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8723 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8724 GCPtr += uSel & X86_SEL_MASK;
8725
8726 /*
8727 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8728 * ugly stuff to avoid this. This will make sure it's an atomic access
8729 * as well more or less remove any question about 8-bit or 32-bit accesss.
8730 */
8731 VBOXSTRICTRC rcStrict;
8732 uint32_t volatile *pu32;
8733 if ((GCPtr & 3) == 0)
8734 {
8735 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8736 GCPtr += 2 + 2;
8737 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8738 if (rcStrict != VINF_SUCCESS)
8739 return rcStrict;
8740 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8741 }
8742 else
8743 {
8744 /* The misaligned GDT/LDT case, map the whole thing. */
8745 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8746 if (rcStrict != VINF_SUCCESS)
8747 return rcStrict;
8748 switch ((uintptr_t)pu32 & 3)
8749 {
8750 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8751 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8752 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8753 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8754 }
8755 }
8756
8757 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8758}
8759
8760/** @} */
8761
8762/** @name Opcode Helpers.
8763 * @{
8764 */
8765
8766/**
8767 * Calculates the effective address of a ModR/M memory operand.
8768 *
8769 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8770 *
8771 * @return Strict VBox status code.
8772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8773 * @param bRm The ModRM byte.
8774 * @param cbImm The size of any immediate following the
8775 * effective address opcode bytes. Important for
8776 * RIP relative addressing.
8777 * @param pGCPtrEff Where to return the effective address.
8778 */
8779VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8780{
8781 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8782# define SET_SS_DEF() \
8783 do \
8784 { \
8785 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8786 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8787 } while (0)
8788
8789 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8790 {
8791/** @todo Check the effective address size crap! */
8792 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8793 {
8794 uint16_t u16EffAddr;
8795
8796 /* Handle the disp16 form with no registers first. */
8797 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8798 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8799 else
8800 {
8801 /* Get the displacment. */
8802 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8803 {
8804 case 0: u16EffAddr = 0; break;
8805 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8806 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8807 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8808 }
8809
8810 /* Add the base and index registers to the disp. */
8811 switch (bRm & X86_MODRM_RM_MASK)
8812 {
8813 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8814 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8815 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8816 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8817 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8818 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8819 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8820 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8821 }
8822 }
8823
8824 *pGCPtrEff = u16EffAddr;
8825 }
8826 else
8827 {
8828 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8829 uint32_t u32EffAddr;
8830
8831 /* Handle the disp32 form with no registers first. */
8832 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8833 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8834 else
8835 {
8836 /* Get the register (or SIB) value. */
8837 switch ((bRm & X86_MODRM_RM_MASK))
8838 {
8839 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8840 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8841 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8842 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8843 case 4: /* SIB */
8844 {
8845 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8846
8847 /* Get the index and scale it. */
8848 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8849 {
8850 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8851 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8852 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8853 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8854 case 4: u32EffAddr = 0; /*none */ break;
8855 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8856 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8857 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8858 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8859 }
8860 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8861
8862 /* add base */
8863 switch (bSib & X86_SIB_BASE_MASK)
8864 {
8865 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8866 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8867 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8868 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8869 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8870 case 5:
8871 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8872 {
8873 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8874 SET_SS_DEF();
8875 }
8876 else
8877 {
8878 uint32_t u32Disp;
8879 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8880 u32EffAddr += u32Disp;
8881 }
8882 break;
8883 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8884 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8885 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8886 }
8887 break;
8888 }
8889 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8890 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8891 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8892 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8893 }
8894
8895 /* Get and add the displacement. */
8896 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8897 {
8898 case 0:
8899 break;
8900 case 1:
8901 {
8902 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8903 u32EffAddr += i8Disp;
8904 break;
8905 }
8906 case 2:
8907 {
8908 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8909 u32EffAddr += u32Disp;
8910 break;
8911 }
8912 default:
8913 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8914 }
8915
8916 }
8917 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8918 *pGCPtrEff = u32EffAddr;
8919 else
8920 {
8921 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8922 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8923 }
8924 }
8925 }
8926 else
8927 {
8928 uint64_t u64EffAddr;
8929
8930 /* Handle the rip+disp32 form with no registers first. */
8931 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8932 {
8933 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8934 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8935 }
8936 else
8937 {
8938 /* Get the register (or SIB) value. */
8939 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8940 {
8941 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8942 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8943 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8944 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8945 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8946 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8947 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8948 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8949 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8950 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8951 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8952 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8953 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8954 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8955 /* SIB */
8956 case 4:
8957 case 12:
8958 {
8959 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8960
8961 /* Get the index and scale it. */
8962 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8963 {
8964 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8965 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8966 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8967 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8968 case 4: u64EffAddr = 0; /*none */ break;
8969 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8970 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8971 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8972 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8973 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8974 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8975 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8976 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8977 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8978 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8979 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8980 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8981 }
8982 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8983
8984 /* add base */
8985 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8986 {
8987 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8988 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8989 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8990 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8991 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8992 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8993 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8994 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8995 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8996 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8997 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8998 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8999 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9000 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9001 /* complicated encodings */
9002 case 5:
9003 case 13:
9004 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9005 {
9006 if (!pVCpu->iem.s.uRexB)
9007 {
9008 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9009 SET_SS_DEF();
9010 }
9011 else
9012 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9013 }
9014 else
9015 {
9016 uint32_t u32Disp;
9017 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9018 u64EffAddr += (int32_t)u32Disp;
9019 }
9020 break;
9021 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9022 }
9023 break;
9024 }
9025 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9026 }
9027
9028 /* Get and add the displacement. */
9029 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9030 {
9031 case 0:
9032 break;
9033 case 1:
9034 {
9035 int8_t i8Disp;
9036 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9037 u64EffAddr += i8Disp;
9038 break;
9039 }
9040 case 2:
9041 {
9042 uint32_t u32Disp;
9043 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9044 u64EffAddr += (int32_t)u32Disp;
9045 break;
9046 }
9047 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9048 }
9049
9050 }
9051
9052 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9053 *pGCPtrEff = u64EffAddr;
9054 else
9055 {
9056 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9057 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9058 }
9059 }
9060
9061 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9062 return VINF_SUCCESS;
9063}
9064
9065
9066/**
9067 * Calculates the effective address of a ModR/M memory operand.
9068 *
9069 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9070 *
9071 * @return Strict VBox status code.
9072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9073 * @param bRm The ModRM byte.
9074 * @param cbImm The size of any immediate following the
9075 * effective address opcode bytes. Important for
9076 * RIP relative addressing.
9077 * @param pGCPtrEff Where to return the effective address.
9078 * @param offRsp RSP displacement.
9079 */
9080VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
9081{
9082 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
9083# define SET_SS_DEF() \
9084 do \
9085 { \
9086 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9087 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9088 } while (0)
9089
9090 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9091 {
9092/** @todo Check the effective address size crap! */
9093 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9094 {
9095 uint16_t u16EffAddr;
9096
9097 /* Handle the disp16 form with no registers first. */
9098 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9099 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9100 else
9101 {
9102 /* Get the displacment. */
9103 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9104 {
9105 case 0: u16EffAddr = 0; break;
9106 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9107 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9108 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
9109 }
9110
9111 /* Add the base and index registers to the disp. */
9112 switch (bRm & X86_MODRM_RM_MASK)
9113 {
9114 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9115 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9116 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9117 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9118 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9119 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9120 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9121 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9122 }
9123 }
9124
9125 *pGCPtrEff = u16EffAddr;
9126 }
9127 else
9128 {
9129 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9130 uint32_t u32EffAddr;
9131
9132 /* Handle the disp32 form with no registers first. */
9133 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9134 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9135 else
9136 {
9137 /* Get the register (or SIB) value. */
9138 switch ((bRm & X86_MODRM_RM_MASK))
9139 {
9140 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9141 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9142 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9143 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9144 case 4: /* SIB */
9145 {
9146 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9147
9148 /* Get the index and scale it. */
9149 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9150 {
9151 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9152 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9153 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9154 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9155 case 4: u32EffAddr = 0; /*none */ break;
9156 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9157 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9158 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9159 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9160 }
9161 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9162
9163 /* add base */
9164 switch (bSib & X86_SIB_BASE_MASK)
9165 {
9166 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9167 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9168 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9169 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9170 case 4:
9171 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
9172 SET_SS_DEF();
9173 break;
9174 case 5:
9175 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9176 {
9177 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9178 SET_SS_DEF();
9179 }
9180 else
9181 {
9182 uint32_t u32Disp;
9183 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9184 u32EffAddr += u32Disp;
9185 }
9186 break;
9187 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9188 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9189 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9190 }
9191 break;
9192 }
9193 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9194 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9195 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9196 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9197 }
9198
9199 /* Get and add the displacement. */
9200 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9201 {
9202 case 0:
9203 break;
9204 case 1:
9205 {
9206 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9207 u32EffAddr += i8Disp;
9208 break;
9209 }
9210 case 2:
9211 {
9212 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9213 u32EffAddr += u32Disp;
9214 break;
9215 }
9216 default:
9217 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9218 }
9219
9220 }
9221 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9222 *pGCPtrEff = u32EffAddr;
9223 else
9224 {
9225 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9226 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9227 }
9228 }
9229 }
9230 else
9231 {
9232 uint64_t u64EffAddr;
9233
9234 /* Handle the rip+disp32 form with no registers first. */
9235 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9236 {
9237 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9238 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9239 }
9240 else
9241 {
9242 /* Get the register (or SIB) value. */
9243 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9244 {
9245 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9246 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9247 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9248 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9249 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9250 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9251 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9252 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9253 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9254 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9255 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9256 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9257 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9258 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9259 /* SIB */
9260 case 4:
9261 case 12:
9262 {
9263 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9264
9265 /* Get the index and scale it. */
9266 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9267 {
9268 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9269 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9270 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9271 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9272 case 4: u64EffAddr = 0; /*none */ break;
9273 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9274 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9275 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9276 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9277 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9278 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9279 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9280 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9281 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9282 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9283 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9284 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9285 }
9286 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9287
9288 /* add base */
9289 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9290 {
9291 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9292 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9293 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9294 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9295 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9296 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9297 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9298 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9299 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9300 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9301 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9302 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9303 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9304 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9305 /* complicated encodings */
9306 case 5:
9307 case 13:
9308 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9309 {
9310 if (!pVCpu->iem.s.uRexB)
9311 {
9312 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9313 SET_SS_DEF();
9314 }
9315 else
9316 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9317 }
9318 else
9319 {
9320 uint32_t u32Disp;
9321 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9322 u64EffAddr += (int32_t)u32Disp;
9323 }
9324 break;
9325 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9326 }
9327 break;
9328 }
9329 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9330 }
9331
9332 /* Get and add the displacement. */
9333 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9334 {
9335 case 0:
9336 break;
9337 case 1:
9338 {
9339 int8_t i8Disp;
9340 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9341 u64EffAddr += i8Disp;
9342 break;
9343 }
9344 case 2:
9345 {
9346 uint32_t u32Disp;
9347 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9348 u64EffAddr += (int32_t)u32Disp;
9349 break;
9350 }
9351 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9352 }
9353
9354 }
9355
9356 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9357 *pGCPtrEff = u64EffAddr;
9358 else
9359 {
9360 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9361 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9362 }
9363 }
9364
9365 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9366 return VINF_SUCCESS;
9367}
9368
9369
9370#ifdef IEM_WITH_SETJMP
9371/**
9372 * Calculates the effective address of a ModR/M memory operand.
9373 *
9374 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9375 *
9376 * May longjmp on internal error.
9377 *
9378 * @return The effective address.
9379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9380 * @param bRm The ModRM byte.
9381 * @param cbImm The size of any immediate following the
9382 * effective address opcode bytes. Important for
9383 * RIP relative addressing.
9384 */
9385RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM_NOEXCEPT_MAY_LONGJMP
9386{
9387 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9388# define SET_SS_DEF() \
9389 do \
9390 { \
9391 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9392 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9393 } while (0)
9394
9395 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9396 {
9397/** @todo Check the effective address size crap! */
9398 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9399 {
9400 uint16_t u16EffAddr;
9401
9402 /* Handle the disp16 form with no registers first. */
9403 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9404 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9405 else
9406 {
9407 /* Get the displacment. */
9408 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9409 {
9410 case 0: u16EffAddr = 0; break;
9411 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9412 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9413 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
9414 }
9415
9416 /* Add the base and index registers to the disp. */
9417 switch (bRm & X86_MODRM_RM_MASK)
9418 {
9419 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9420 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9421 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9422 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9423 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9424 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9425 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9426 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9427 }
9428 }
9429
9430 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9431 return u16EffAddr;
9432 }
9433
9434 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9435 uint32_t u32EffAddr;
9436
9437 /* Handle the disp32 form with no registers first. */
9438 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9439 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9440 else
9441 {
9442 /* Get the register (or SIB) value. */
9443 switch ((bRm & X86_MODRM_RM_MASK))
9444 {
9445 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9446 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9447 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9448 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9449 case 4: /* SIB */
9450 {
9451 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9452
9453 /* Get the index and scale it. */
9454 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9455 {
9456 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9457 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9458 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9459 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9460 case 4: u32EffAddr = 0; /*none */ break;
9461 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9462 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9463 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9464 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9465 }
9466 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9467
9468 /* add base */
9469 switch (bSib & X86_SIB_BASE_MASK)
9470 {
9471 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9472 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9473 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9474 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9475 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9476 case 5:
9477 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9478 {
9479 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9480 SET_SS_DEF();
9481 }
9482 else
9483 {
9484 uint32_t u32Disp;
9485 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9486 u32EffAddr += u32Disp;
9487 }
9488 break;
9489 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9490 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9491 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9492 }
9493 break;
9494 }
9495 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9496 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9497 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9498 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9499 }
9500
9501 /* Get and add the displacement. */
9502 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9503 {
9504 case 0:
9505 break;
9506 case 1:
9507 {
9508 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9509 u32EffAddr += i8Disp;
9510 break;
9511 }
9512 case 2:
9513 {
9514 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9515 u32EffAddr += u32Disp;
9516 break;
9517 }
9518 default:
9519 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
9520 }
9521 }
9522
9523 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9524 {
9525 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9526 return u32EffAddr;
9527 }
9528 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9529 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9530 return u32EffAddr & UINT16_MAX;
9531 }
9532
9533 uint64_t u64EffAddr;
9534
9535 /* Handle the rip+disp32 form with no registers first. */
9536 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9537 {
9538 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9539 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9540 }
9541 else
9542 {
9543 /* Get the register (or SIB) value. */
9544 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9545 {
9546 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9547 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9548 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9549 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9550 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9551 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9552 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9553 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9554 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9555 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9556 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9557 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9558 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9559 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9560 /* SIB */
9561 case 4:
9562 case 12:
9563 {
9564 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9565
9566 /* Get the index and scale it. */
9567 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9568 {
9569 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9570 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9571 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9572 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9573 case 4: u64EffAddr = 0; /*none */ break;
9574 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9575 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9576 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9577 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9578 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9579 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9580 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9581 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9582 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9583 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9584 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9585 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9586 }
9587 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9588
9589 /* add base */
9590 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9591 {
9592 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9593 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9594 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9595 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9596 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9597 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9598 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9599 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9600 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9601 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9602 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9603 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9604 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9605 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9606 /* complicated encodings */
9607 case 5:
9608 case 13:
9609 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9610 {
9611 if (!pVCpu->iem.s.uRexB)
9612 {
9613 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9614 SET_SS_DEF();
9615 }
9616 else
9617 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9618 }
9619 else
9620 {
9621 uint32_t u32Disp;
9622 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9623 u64EffAddr += (int32_t)u32Disp;
9624 }
9625 break;
9626 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9627 }
9628 break;
9629 }
9630 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9631 }
9632
9633 /* Get and add the displacement. */
9634 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9635 {
9636 case 0:
9637 break;
9638 case 1:
9639 {
9640 int8_t i8Disp;
9641 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9642 u64EffAddr += i8Disp;
9643 break;
9644 }
9645 case 2:
9646 {
9647 uint32_t u32Disp;
9648 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9649 u64EffAddr += (int32_t)u32Disp;
9650 break;
9651 }
9652 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9653 }
9654
9655 }
9656
9657 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9658 {
9659 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9660 return u64EffAddr;
9661 }
9662 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9663 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9664 return u64EffAddr & UINT32_MAX;
9665}
9666#endif /* IEM_WITH_SETJMP */
9667
9668/** @} */
9669
9670
9671#ifdef LOG_ENABLED
9672/**
9673 * Logs the current instruction.
9674 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9675 * @param fSameCtx Set if we have the same context information as the VMM,
9676 * clear if we may have already executed an instruction in
9677 * our debug context. When clear, we assume IEMCPU holds
9678 * valid CPU mode info.
9679 *
9680 * The @a fSameCtx parameter is now misleading and obsolete.
9681 * @param pszFunction The IEM function doing the execution.
9682 */
9683static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9684{
9685# ifdef IN_RING3
9686 if (LogIs2Enabled())
9687 {
9688 char szInstr[256];
9689 uint32_t cbInstr = 0;
9690 if (fSameCtx)
9691 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9692 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9693 szInstr, sizeof(szInstr), &cbInstr);
9694 else
9695 {
9696 uint32_t fFlags = 0;
9697 switch (pVCpu->iem.s.enmCpuMode)
9698 {
9699 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9700 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9701 case IEMMODE_16BIT:
9702 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9703 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9704 else
9705 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9706 break;
9707 }
9708 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9709 szInstr, sizeof(szInstr), &cbInstr);
9710 }
9711
9712 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9713 Log2(("**** %s\n"
9714 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9715 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9716 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9717 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9718 " %s\n"
9719 , pszFunction,
9720 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9721 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9722 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9723 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9724 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9725 szInstr));
9726
9727 if (LogIs3Enabled())
9728 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9729 }
9730 else
9731# endif
9732 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9733 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9734 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9735}
9736#endif /* LOG_ENABLED */
9737
9738
9739#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9740/**
9741 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9742 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9743 *
9744 * @returns Modified rcStrict.
9745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9746 * @param rcStrict The instruction execution status.
9747 */
9748static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9749{
9750 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9751 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9752 {
9753 /* VMX preemption timer takes priority over NMI-window exits. */
9754 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9755 {
9756 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9757 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9758 }
9759 /*
9760 * Check remaining intercepts.
9761 *
9762 * NMI-window and Interrupt-window VM-exits.
9763 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9764 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9765 *
9766 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9767 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9768 */
9769 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9770 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9771 && !TRPMHasTrap(pVCpu))
9772 {
9773 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9774 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9775 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9776 {
9777 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9778 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9779 }
9780 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9781 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9782 {
9783 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9784 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9785 }
9786 }
9787 }
9788 /* TPR-below threshold/APIC write has the highest priority. */
9789 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9790 {
9791 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9792 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9793 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9794 }
9795 /* MTF takes priority over VMX-preemption timer. */
9796 else
9797 {
9798 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9799 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9800 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9801 }
9802 return rcStrict;
9803}
9804#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9805
9806
9807/** @def IEM_TRY_SETJMP
9808 * Wrapper around setjmp / try, hiding all the ugly differences.
9809 *
9810 * @note Use with extreme care as this is a fragile macro.
9811 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9812 * @param a_rcTarget The variable that should receive the status code in case
9813 * of a longjmp/throw.
9814 */
9815/** @def IEM_TRY_SETJMP_AGAIN
9816 * For when setjmp / try is used again in the same variable scope as a previous
9817 * IEM_TRY_SETJMP invocation.
9818 */
9819/** @def IEM_CATCH_LONGJMP_BEGIN
9820 * Start wrapper for catch / setjmp-else.
9821 *
9822 * This will set up a scope.
9823 *
9824 * @note Use with extreme care as this is a fragile macro.
9825 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9826 * @param a_rcTarget The variable that should receive the status code in case
9827 * of a longjmp/throw.
9828 */
9829/** @def IEM_CATCH_LONGJMP_END
9830 * End wrapper for catch / setjmp-else.
9831 *
9832 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
9833 * state.
9834 *
9835 * @note Use with extreme care as this is a fragile macro.
9836 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
9837 */
9838#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
9839# ifdef IEM_WITH_THROW_CATCH
9840# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
9841 a_rcTarget = VINF_SUCCESS; \
9842 try
9843# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
9844 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
9845# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
9846 catch (int rcThrown) \
9847 { \
9848 a_rcTarget = rcThrown
9849# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
9850 } \
9851 ((void)0)
9852# else /* !IEM_WITH_THROW_CATCH */
9853# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
9854 jmp_buf JmpBuf; \
9855 jmp_buf * volatile pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \
9856 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
9857 if ((rcStrict = setjmp(JmpBuf)) == 0)
9858# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
9859 pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); \
9860 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
9861 if ((rcStrict = setjmp(JmpBuf)) == 0)
9862# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
9863 else \
9864 { \
9865 ((void)0)
9866# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
9867 } \
9868 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
9869# endif /* !IEM_WITH_THROW_CATCH */
9870#endif /* IEM_WITH_SETJMP */
9871
9872
9873/**
9874 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9875 * IEMExecOneWithPrefetchedByPC.
9876 *
9877 * Similar code is found in IEMExecLots.
9878 *
9879 * @return Strict VBox status code.
9880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9881 * @param fExecuteInhibit If set, execute the instruction following CLI,
9882 * POP SS and MOV SS,GR.
9883 * @param pszFunction The calling function name.
9884 */
9885DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9886{
9887 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9888 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9889 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9890 RT_NOREF_PV(pszFunction);
9891
9892#ifdef IEM_WITH_SETJMP
9893 VBOXSTRICTRC rcStrict;
9894 IEM_TRY_SETJMP(pVCpu, rcStrict)
9895 {
9896 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9897 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9898 }
9899 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9900 {
9901 pVCpu->iem.s.cLongJumps++;
9902 }
9903 IEM_CATCH_LONGJMP_END(pVCpu);
9904#else
9905 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9906 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9907#endif
9908 if (rcStrict == VINF_SUCCESS)
9909 pVCpu->iem.s.cInstructions++;
9910 if (pVCpu->iem.s.cActiveMappings > 0)
9911 {
9912 Assert(rcStrict != VINF_SUCCESS);
9913 iemMemRollback(pVCpu);
9914 }
9915 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9916 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9917 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9918
9919//#ifdef DEBUG
9920// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9921//#endif
9922
9923#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9924 /*
9925 * Perform any VMX nested-guest instruction boundary actions.
9926 *
9927 * If any of these causes a VM-exit, we must skip executing the next
9928 * instruction (would run into stale page tables). A VM-exit makes sure
9929 * there is no interrupt-inhibition, so that should ensure we don't go
9930 * to try execute the next instruction. Clearing fExecuteInhibit is
9931 * problematic because of the setjmp/longjmp clobbering above.
9932 */
9933 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9934 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9935 || rcStrict != VINF_SUCCESS)
9936 { /* likely */ }
9937 else
9938 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9939#endif
9940
9941 /* Execute the next instruction as well if a cli, pop ss or
9942 mov ss, Gr has just completed successfully. */
9943 if ( fExecuteInhibit
9944 && rcStrict == VINF_SUCCESS
9945 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9946 {
9947 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9948 if (rcStrict == VINF_SUCCESS)
9949 {
9950#ifdef LOG_ENABLED
9951 iemLogCurInstr(pVCpu, false, pszFunction);
9952#endif
9953#ifdef IEM_WITH_SETJMP
9954 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9955 {
9956 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9957 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9958 }
9959 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9960 {
9961 pVCpu->iem.s.cLongJumps++;
9962 }
9963 IEM_CATCH_LONGJMP_END(pVCpu);
9964#else
9965 IEM_OPCODE_GET_FIRST_U8(&b);
9966 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9967#endif
9968 if (rcStrict == VINF_SUCCESS)
9969 {
9970 pVCpu->iem.s.cInstructions++;
9971#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9972 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9973 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9974 { /* likely */ }
9975 else
9976 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9977#endif
9978 }
9979 if (pVCpu->iem.s.cActiveMappings > 0)
9980 {
9981 Assert(rcStrict != VINF_SUCCESS);
9982 iemMemRollback(pVCpu);
9983 }
9984 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9985 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9986 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9987 }
9988 else if (pVCpu->iem.s.cActiveMappings > 0)
9989 iemMemRollback(pVCpu);
9990 /** @todo drop this after we bake this change into RIP advancing. */
9991 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9992 }
9993
9994 /*
9995 * Return value fiddling, statistics and sanity assertions.
9996 */
9997 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9998
9999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10001 return rcStrict;
10002}
10003
10004
10005/**
10006 * Execute one instruction.
10007 *
10008 * @return Strict VBox status code.
10009 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10010 */
10011VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
10012{
10013 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
10014#ifdef LOG_ENABLED
10015 iemLogCurInstr(pVCpu, true, "IEMExecOne");
10016#endif
10017
10018 /*
10019 * Do the decoding and emulation.
10020 */
10021 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10022 if (rcStrict == VINF_SUCCESS)
10023 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
10024 else if (pVCpu->iem.s.cActiveMappings > 0)
10025 iemMemRollback(pVCpu);
10026
10027 if (rcStrict != VINF_SUCCESS)
10028 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10029 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10030 return rcStrict;
10031}
10032
10033
10034VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10035{
10036 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10037 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10038 if (rcStrict == VINF_SUCCESS)
10039 {
10040 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
10041 if (pcbWritten)
10042 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10043 }
10044 else if (pVCpu->iem.s.cActiveMappings > 0)
10045 iemMemRollback(pVCpu);
10046
10047 return rcStrict;
10048}
10049
10050
10051VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10052 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10053{
10054 VBOXSTRICTRC rcStrict;
10055 if ( cbOpcodeBytes
10056 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10057 {
10058 iemInitDecoder(pVCpu, false, false);
10059#ifdef IEM_WITH_CODE_TLB
10060 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10061 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10062 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10063 pVCpu->iem.s.offCurInstrStart = 0;
10064 pVCpu->iem.s.offInstrNextByte = 0;
10065#else
10066 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10067 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10068#endif
10069 rcStrict = VINF_SUCCESS;
10070 }
10071 else
10072 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10073 if (rcStrict == VINF_SUCCESS)
10074 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
10075 else if (pVCpu->iem.s.cActiveMappings > 0)
10076 iemMemRollback(pVCpu);
10077
10078 return rcStrict;
10079}
10080
10081
10082VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
10083{
10084 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
10085 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
10086 if (rcStrict == VINF_SUCCESS)
10087 {
10088 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
10089 if (pcbWritten)
10090 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
10091 }
10092 else if (pVCpu->iem.s.cActiveMappings > 0)
10093 iemMemRollback(pVCpu);
10094
10095 return rcStrict;
10096}
10097
10098
10099VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
10100 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
10101{
10102 VBOXSTRICTRC rcStrict;
10103 if ( cbOpcodeBytes
10104 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
10105 {
10106 iemInitDecoder(pVCpu, true, false);
10107#ifdef IEM_WITH_CODE_TLB
10108 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
10109 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
10110 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
10111 pVCpu->iem.s.offCurInstrStart = 0;
10112 pVCpu->iem.s.offInstrNextByte = 0;
10113#else
10114 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
10115 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
10116#endif
10117 rcStrict = VINF_SUCCESS;
10118 }
10119 else
10120 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
10121 if (rcStrict == VINF_SUCCESS)
10122 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
10123 else if (pVCpu->iem.s.cActiveMappings > 0)
10124 iemMemRollback(pVCpu);
10125
10126 return rcStrict;
10127}
10128
10129
10130/**
10131 * For handling split cacheline lock operations when the host has split-lock
10132 * detection enabled.
10133 *
10134 * This will cause the interpreter to disregard the lock prefix and implicit
10135 * locking (xchg).
10136 *
10137 * @returns Strict VBox status code.
10138 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10139 */
10140VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
10141{
10142 /*
10143 * Do the decoding and emulation.
10144 */
10145 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
10146 if (rcStrict == VINF_SUCCESS)
10147 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
10148 else if (pVCpu->iem.s.cActiveMappings > 0)
10149 iemMemRollback(pVCpu);
10150
10151 if (rcStrict != VINF_SUCCESS)
10152 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10153 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10154 return rcStrict;
10155}
10156
10157
10158VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
10159{
10160 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
10161 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
10162
10163 /*
10164 * See if there is an interrupt pending in TRPM, inject it if we can.
10165 */
10166 /** @todo What if we are injecting an exception and not an interrupt? Is that
10167 * possible here? For now we assert it is indeed only an interrupt. */
10168 if (!TRPMHasTrap(pVCpu))
10169 { /* likely */ }
10170 else
10171 {
10172 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
10173 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
10174 {
10175 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
10176#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10177 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
10178 if (fIntrEnabled)
10179 {
10180 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
10181 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10182 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
10183 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
10184 else
10185 {
10186 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
10187 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
10188 }
10189 }
10190#else
10191 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
10192#endif
10193 if (fIntrEnabled)
10194 {
10195 uint8_t u8TrapNo;
10196 TRPMEVENT enmType;
10197 uint32_t uErrCode;
10198 RTGCPTR uCr2;
10199 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
10200 AssertRC(rc2);
10201 Assert(enmType == TRPM_HARDWARE_INT);
10202 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
10203
10204 TRPMResetTrap(pVCpu);
10205
10206#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10207 /* Injecting an event may cause a VM-exit. */
10208 if ( rcStrict != VINF_SUCCESS
10209 && rcStrict != VINF_IEM_RAISED_XCPT)
10210 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
10211#else
10212 NOREF(rcStrict);
10213#endif
10214 }
10215 }
10216 }
10217
10218 /*
10219 * Initial decoder init w/ prefetch, then setup setjmp.
10220 */
10221 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10222 if (rcStrict == VINF_SUCCESS)
10223 {
10224#ifdef IEM_WITH_SETJMP
10225 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
10226 IEM_TRY_SETJMP(pVCpu, rcStrict)
10227#endif
10228 {
10229 /*
10230 * The run loop. We limit ourselves to 4096 instructions right now.
10231 */
10232 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
10233 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10234 for (;;)
10235 {
10236 /*
10237 * Log the state.
10238 */
10239#ifdef LOG_ENABLED
10240 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10241#endif
10242
10243 /*
10244 * Do the decoding and emulation.
10245 */
10246 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10247 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10248#ifdef VBOX_STRICT
10249 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
10250#endif
10251 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10252 {
10253 Assert(pVCpu->iem.s.cActiveMappings == 0);
10254 pVCpu->iem.s.cInstructions++;
10255
10256#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10257 /* Perform any VMX nested-guest instruction boundary actions. */
10258 uint64_t fCpu = pVCpu->fLocalForcedActions;
10259 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10260 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10261 { /* likely */ }
10262 else
10263 {
10264 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10265 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10266 fCpu = pVCpu->fLocalForcedActions;
10267 else
10268 {
10269 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10270 break;
10271 }
10272 }
10273#endif
10274 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10275 {
10276#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10277 uint64_t fCpu = pVCpu->fLocalForcedActions;
10278#endif
10279 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10280 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10281 | VMCPU_FF_TLB_FLUSH
10282 | VMCPU_FF_UNHALT );
10283
10284 if (RT_LIKELY( ( !fCpu
10285 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10286 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10287 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10288 {
10289 if (cMaxInstructionsGccStupidity-- > 0)
10290 {
10291 /* Poll timers every now an then according to the caller's specs. */
10292 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10293 || !TMTimerPollBool(pVM, pVCpu))
10294 {
10295 Assert(pVCpu->iem.s.cActiveMappings == 0);
10296 iemReInitDecoder(pVCpu);
10297 continue;
10298 }
10299 }
10300 }
10301 }
10302 Assert(pVCpu->iem.s.cActiveMappings == 0);
10303 }
10304 else if (pVCpu->iem.s.cActiveMappings > 0)
10305 iemMemRollback(pVCpu);
10306 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10307 break;
10308 }
10309 }
10310#ifdef IEM_WITH_SETJMP
10311 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10312 {
10313 if (pVCpu->iem.s.cActiveMappings > 0)
10314 iemMemRollback(pVCpu);
10315# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10316 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10317# endif
10318 pVCpu->iem.s.cLongJumps++;
10319 }
10320 IEM_CATCH_LONGJMP_END(pVCpu);
10321#endif
10322
10323 /*
10324 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10325 */
10326 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10327 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10328 }
10329 else
10330 {
10331 if (pVCpu->iem.s.cActiveMappings > 0)
10332 iemMemRollback(pVCpu);
10333
10334#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10335 /*
10336 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10337 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10338 */
10339 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10340#endif
10341 }
10342
10343 /*
10344 * Maybe re-enter raw-mode and log.
10345 */
10346 if (rcStrict != VINF_SUCCESS)
10347 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10348 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10349 if (pcInstructions)
10350 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10351 return rcStrict;
10352}
10353
10354
10355/**
10356 * Interface used by EMExecuteExec, does exit statistics and limits.
10357 *
10358 * @returns Strict VBox status code.
10359 * @param pVCpu The cross context virtual CPU structure.
10360 * @param fWillExit To be defined.
10361 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10362 * @param cMaxInstructions Maximum number of instructions to execute.
10363 * @param cMaxInstructionsWithoutExits
10364 * The max number of instructions without exits.
10365 * @param pStats Where to return statistics.
10366 */
10367VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10368 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10369{
10370 NOREF(fWillExit); /** @todo define flexible exit crits */
10371
10372 /*
10373 * Initialize return stats.
10374 */
10375 pStats->cInstructions = 0;
10376 pStats->cExits = 0;
10377 pStats->cMaxExitDistance = 0;
10378 pStats->cReserved = 0;
10379
10380 /*
10381 * Initial decoder init w/ prefetch, then setup setjmp.
10382 */
10383 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10384 if (rcStrict == VINF_SUCCESS)
10385 {
10386#ifdef IEM_WITH_SETJMP
10387 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
10388 IEM_TRY_SETJMP(pVCpu, rcStrict)
10389#endif
10390 {
10391#ifdef IN_RING0
10392 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10393#endif
10394 uint32_t cInstructionSinceLastExit = 0;
10395
10396 /*
10397 * The run loop. We limit ourselves to 4096 instructions right now.
10398 */
10399 PVM pVM = pVCpu->CTX_SUFF(pVM);
10400 for (;;)
10401 {
10402 /*
10403 * Log the state.
10404 */
10405#ifdef LOG_ENABLED
10406 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10407#endif
10408
10409 /*
10410 * Do the decoding and emulation.
10411 */
10412 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10413
10414 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
10415 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
10416
10417 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10418 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10419 {
10420 pStats->cExits += 1;
10421 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10422 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10423 cInstructionSinceLastExit = 0;
10424 }
10425
10426 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10427 {
10428 Assert(pVCpu->iem.s.cActiveMappings == 0);
10429 pVCpu->iem.s.cInstructions++;
10430 pStats->cInstructions++;
10431 cInstructionSinceLastExit++;
10432
10433#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10434 /* Perform any VMX nested-guest instruction boundary actions. */
10435 uint64_t fCpu = pVCpu->fLocalForcedActions;
10436 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
10437 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
10438 { /* likely */ }
10439 else
10440 {
10441 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
10442 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10443 fCpu = pVCpu->fLocalForcedActions;
10444 else
10445 {
10446 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10447 break;
10448 }
10449 }
10450#endif
10451 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10452 {
10453#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
10454 uint64_t fCpu = pVCpu->fLocalForcedActions;
10455#endif
10456 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10457 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10458 | VMCPU_FF_TLB_FLUSH
10459 | VMCPU_FF_UNHALT );
10460 if (RT_LIKELY( ( ( !fCpu
10461 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10462 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10463 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10464 || pStats->cInstructions < cMinInstructions))
10465 {
10466 if (pStats->cInstructions < cMaxInstructions)
10467 {
10468 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10469 {
10470#ifdef IN_RING0
10471 if ( !fCheckPreemptionPending
10472 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10473#endif
10474 {
10475 Assert(pVCpu->iem.s.cActiveMappings == 0);
10476 iemReInitDecoder(pVCpu);
10477 continue;
10478 }
10479#ifdef IN_RING0
10480 rcStrict = VINF_EM_RAW_INTERRUPT;
10481 break;
10482#endif
10483 }
10484 }
10485 }
10486 Assert(!(fCpu & VMCPU_FF_IEM));
10487 }
10488 Assert(pVCpu->iem.s.cActiveMappings == 0);
10489 }
10490 else if (pVCpu->iem.s.cActiveMappings > 0)
10491 iemMemRollback(pVCpu);
10492 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10493 break;
10494 }
10495 }
10496#ifdef IEM_WITH_SETJMP
10497 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10498 {
10499 if (pVCpu->iem.s.cActiveMappings > 0)
10500 iemMemRollback(pVCpu);
10501 pVCpu->iem.s.cLongJumps++;
10502 }
10503 IEM_CATCH_LONGJMP_END(pVCpu);
10504#endif
10505
10506 /*
10507 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10508 */
10509 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10510 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10511 }
10512 else
10513 {
10514 if (pVCpu->iem.s.cActiveMappings > 0)
10515 iemMemRollback(pVCpu);
10516
10517#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10518 /*
10519 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10520 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10521 */
10522 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10523#endif
10524 }
10525
10526 /*
10527 * Maybe re-enter raw-mode and log.
10528 */
10529 if (rcStrict != VINF_SUCCESS)
10530 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10531 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10532 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10533 return rcStrict;
10534}
10535
10536
10537/**
10538 * Injects a trap, fault, abort, software interrupt or external interrupt.
10539 *
10540 * The parameter list matches TRPMQueryTrapAll pretty closely.
10541 *
10542 * @returns Strict VBox status code.
10543 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10544 * @param u8TrapNo The trap number.
10545 * @param enmType What type is it (trap/fault/abort), software
10546 * interrupt or hardware interrupt.
10547 * @param uErrCode The error code if applicable.
10548 * @param uCr2 The CR2 value if applicable.
10549 * @param cbInstr The instruction length (only relevant for
10550 * software interrupts).
10551 */
10552VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10553 uint8_t cbInstr)
10554{
10555 iemInitDecoder(pVCpu, false, false);
10556#ifdef DBGFTRACE_ENABLED
10557 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10558 u8TrapNo, enmType, uErrCode, uCr2);
10559#endif
10560
10561 uint32_t fFlags;
10562 switch (enmType)
10563 {
10564 case TRPM_HARDWARE_INT:
10565 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10566 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10567 uErrCode = uCr2 = 0;
10568 break;
10569
10570 case TRPM_SOFTWARE_INT:
10571 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10572 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10573 uErrCode = uCr2 = 0;
10574 break;
10575
10576 case TRPM_TRAP:
10577 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10578 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10579 if (u8TrapNo == X86_XCPT_PF)
10580 fFlags |= IEM_XCPT_FLAGS_CR2;
10581 switch (u8TrapNo)
10582 {
10583 case X86_XCPT_DF:
10584 case X86_XCPT_TS:
10585 case X86_XCPT_NP:
10586 case X86_XCPT_SS:
10587 case X86_XCPT_PF:
10588 case X86_XCPT_AC:
10589 case X86_XCPT_GP:
10590 fFlags |= IEM_XCPT_FLAGS_ERR;
10591 break;
10592 }
10593 break;
10594
10595 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10596 }
10597
10598 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10599
10600 if (pVCpu->iem.s.cActiveMappings > 0)
10601 iemMemRollback(pVCpu);
10602
10603 return rcStrict;
10604}
10605
10606
10607/**
10608 * Injects the active TRPM event.
10609 *
10610 * @returns Strict VBox status code.
10611 * @param pVCpu The cross context virtual CPU structure.
10612 */
10613VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10614{
10615#ifndef IEM_IMPLEMENTS_TASKSWITCH
10616 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10617#else
10618 uint8_t u8TrapNo;
10619 TRPMEVENT enmType;
10620 uint32_t uErrCode;
10621 RTGCUINTPTR uCr2;
10622 uint8_t cbInstr;
10623 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10624 if (RT_FAILURE(rc))
10625 return rc;
10626
10627 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10628 * ICEBP \#DB injection as a special case. */
10629 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10630#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10631 if (rcStrict == VINF_SVM_VMEXIT)
10632 rcStrict = VINF_SUCCESS;
10633#endif
10634#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10635 if (rcStrict == VINF_VMX_VMEXIT)
10636 rcStrict = VINF_SUCCESS;
10637#endif
10638 /** @todo Are there any other codes that imply the event was successfully
10639 * delivered to the guest? See @bugref{6607}. */
10640 if ( rcStrict == VINF_SUCCESS
10641 || rcStrict == VINF_IEM_RAISED_XCPT)
10642 TRPMResetTrap(pVCpu);
10643
10644 return rcStrict;
10645#endif
10646}
10647
10648
10649VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10650{
10651 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10652 return VERR_NOT_IMPLEMENTED;
10653}
10654
10655
10656VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10657{
10658 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10659 return VERR_NOT_IMPLEMENTED;
10660}
10661
10662
10663/**
10664 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10665 *
10666 * This API ASSUMES that the caller has already verified that the guest code is
10667 * allowed to access the I/O port. (The I/O port is in the DX register in the
10668 * guest state.)
10669 *
10670 * @returns Strict VBox status code.
10671 * @param pVCpu The cross context virtual CPU structure.
10672 * @param cbValue The size of the I/O port access (1, 2, or 4).
10673 * @param enmAddrMode The addressing mode.
10674 * @param fRepPrefix Indicates whether a repeat prefix is used
10675 * (doesn't matter which for this instruction).
10676 * @param cbInstr The instruction length in bytes.
10677 * @param iEffSeg The effective segment address.
10678 * @param fIoChecked Whether the access to the I/O port has been
10679 * checked or not. It's typically checked in the
10680 * HM scenario.
10681 */
10682VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10683 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10684{
10685 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10686 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10687
10688 /*
10689 * State init.
10690 */
10691 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10692
10693 /*
10694 * Switch orgy for getting to the right handler.
10695 */
10696 VBOXSTRICTRC rcStrict;
10697 if (fRepPrefix)
10698 {
10699 switch (enmAddrMode)
10700 {
10701 case IEMMODE_16BIT:
10702 switch (cbValue)
10703 {
10704 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10705 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10706 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10707 default:
10708 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10709 }
10710 break;
10711
10712 case IEMMODE_32BIT:
10713 switch (cbValue)
10714 {
10715 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10716 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10717 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10718 default:
10719 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10720 }
10721 break;
10722
10723 case IEMMODE_64BIT:
10724 switch (cbValue)
10725 {
10726 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10727 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10728 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10729 default:
10730 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10731 }
10732 break;
10733
10734 default:
10735 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10736 }
10737 }
10738 else
10739 {
10740 switch (enmAddrMode)
10741 {
10742 case IEMMODE_16BIT:
10743 switch (cbValue)
10744 {
10745 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10746 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10747 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10748 default:
10749 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10750 }
10751 break;
10752
10753 case IEMMODE_32BIT:
10754 switch (cbValue)
10755 {
10756 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10757 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10758 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10759 default:
10760 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10761 }
10762 break;
10763
10764 case IEMMODE_64BIT:
10765 switch (cbValue)
10766 {
10767 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10768 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10769 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10770 default:
10771 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10772 }
10773 break;
10774
10775 default:
10776 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10777 }
10778 }
10779
10780 if (pVCpu->iem.s.cActiveMappings)
10781 iemMemRollback(pVCpu);
10782
10783 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10784}
10785
10786
10787/**
10788 * Interface for HM and EM for executing string I/O IN (read) instructions.
10789 *
10790 * This API ASSUMES that the caller has already verified that the guest code is
10791 * allowed to access the I/O port. (The I/O port is in the DX register in the
10792 * guest state.)
10793 *
10794 * @returns Strict VBox status code.
10795 * @param pVCpu The cross context virtual CPU structure.
10796 * @param cbValue The size of the I/O port access (1, 2, or 4).
10797 * @param enmAddrMode The addressing mode.
10798 * @param fRepPrefix Indicates whether a repeat prefix is used
10799 * (doesn't matter which for this instruction).
10800 * @param cbInstr The instruction length in bytes.
10801 * @param fIoChecked Whether the access to the I/O port has been
10802 * checked or not. It's typically checked in the
10803 * HM scenario.
10804 */
10805VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10806 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10807{
10808 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10809
10810 /*
10811 * State init.
10812 */
10813 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10814
10815 /*
10816 * Switch orgy for getting to the right handler.
10817 */
10818 VBOXSTRICTRC rcStrict;
10819 if (fRepPrefix)
10820 {
10821 switch (enmAddrMode)
10822 {
10823 case IEMMODE_16BIT:
10824 switch (cbValue)
10825 {
10826 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10827 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10828 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10829 default:
10830 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10831 }
10832 break;
10833
10834 case IEMMODE_32BIT:
10835 switch (cbValue)
10836 {
10837 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10838 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10839 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10840 default:
10841 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10842 }
10843 break;
10844
10845 case IEMMODE_64BIT:
10846 switch (cbValue)
10847 {
10848 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10849 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10850 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10851 default:
10852 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10853 }
10854 break;
10855
10856 default:
10857 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10858 }
10859 }
10860 else
10861 {
10862 switch (enmAddrMode)
10863 {
10864 case IEMMODE_16BIT:
10865 switch (cbValue)
10866 {
10867 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10868 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10869 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10870 default:
10871 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10872 }
10873 break;
10874
10875 case IEMMODE_32BIT:
10876 switch (cbValue)
10877 {
10878 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10879 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10880 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10881 default:
10882 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10883 }
10884 break;
10885
10886 case IEMMODE_64BIT:
10887 switch (cbValue)
10888 {
10889 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10890 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10891 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10892 default:
10893 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10894 }
10895 break;
10896
10897 default:
10898 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10899 }
10900 }
10901
10902 if ( pVCpu->iem.s.cActiveMappings == 0
10903 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10904 { /* likely */ }
10905 else
10906 {
10907 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10908 iemMemRollback(pVCpu);
10909 }
10910 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10911}
10912
10913
10914/**
10915 * Interface for rawmode to write execute an OUT instruction.
10916 *
10917 * @returns Strict VBox status code.
10918 * @param pVCpu The cross context virtual CPU structure.
10919 * @param cbInstr The instruction length in bytes.
10920 * @param u16Port The port to read.
10921 * @param fImm Whether the port is specified using an immediate operand or
10922 * using the implicit DX register.
10923 * @param cbReg The register size.
10924 *
10925 * @remarks In ring-0 not all of the state needs to be synced in.
10926 */
10927VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10928{
10929 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10930 Assert(cbReg <= 4 && cbReg != 3);
10931
10932 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10933 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10934 Assert(!pVCpu->iem.s.cActiveMappings);
10935 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10936}
10937
10938
10939/**
10940 * Interface for rawmode to write execute an IN instruction.
10941 *
10942 * @returns Strict VBox status code.
10943 * @param pVCpu The cross context virtual CPU structure.
10944 * @param cbInstr The instruction length in bytes.
10945 * @param u16Port The port to read.
10946 * @param fImm Whether the port is specified using an immediate operand or
10947 * using the implicit DX.
10948 * @param cbReg The register size.
10949 */
10950VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10951{
10952 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10953 Assert(cbReg <= 4 && cbReg != 3);
10954
10955 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10956 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10957 Assert(!pVCpu->iem.s.cActiveMappings);
10958 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10959}
10960
10961
10962/**
10963 * Interface for HM and EM to write to a CRx register.
10964 *
10965 * @returns Strict VBox status code.
10966 * @param pVCpu The cross context virtual CPU structure.
10967 * @param cbInstr The instruction length in bytes.
10968 * @param iCrReg The control register number (destination).
10969 * @param iGReg The general purpose register number (source).
10970 *
10971 * @remarks In ring-0 not all of the state needs to be synced in.
10972 */
10973VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10974{
10975 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10976 Assert(iCrReg < 16);
10977 Assert(iGReg < 16);
10978
10979 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10980 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10981 Assert(!pVCpu->iem.s.cActiveMappings);
10982 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10983}
10984
10985
10986/**
10987 * Interface for HM and EM to read from a CRx register.
10988 *
10989 * @returns Strict VBox status code.
10990 * @param pVCpu The cross context virtual CPU structure.
10991 * @param cbInstr The instruction length in bytes.
10992 * @param iGReg The general purpose register number (destination).
10993 * @param iCrReg The control register number (source).
10994 *
10995 * @remarks In ring-0 not all of the state needs to be synced in.
10996 */
10997VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10998{
10999 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11000 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
11001 | CPUMCTX_EXTRN_APIC_TPR);
11002 Assert(iCrReg < 16);
11003 Assert(iGReg < 16);
11004
11005 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11006 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
11007 Assert(!pVCpu->iem.s.cActiveMappings);
11008 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11009}
11010
11011
11012/**
11013 * Interface for HM and EM to write to a DRx register.
11014 *
11015 * @returns Strict VBox status code.
11016 * @param pVCpu The cross context virtual CPU structure.
11017 * @param cbInstr The instruction length in bytes.
11018 * @param iDrReg The debug register number (destination).
11019 * @param iGReg The general purpose register number (source).
11020 *
11021 * @remarks In ring-0 not all of the state needs to be synced in.
11022 */
11023VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
11024{
11025 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11026 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11027 Assert(iDrReg < 8);
11028 Assert(iGReg < 16);
11029
11030 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11031 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
11032 Assert(!pVCpu->iem.s.cActiveMappings);
11033 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11034}
11035
11036
11037/**
11038 * Interface for HM and EM to read from a DRx register.
11039 *
11040 * @returns Strict VBox status code.
11041 * @param pVCpu The cross context virtual CPU structure.
11042 * @param cbInstr The instruction length in bytes.
11043 * @param iGReg The general purpose register number (destination).
11044 * @param iDrReg The debug register number (source).
11045 *
11046 * @remarks In ring-0 not all of the state needs to be synced in.
11047 */
11048VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
11049{
11050 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11051 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
11052 Assert(iDrReg < 8);
11053 Assert(iGReg < 16);
11054
11055 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11056 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
11057 Assert(!pVCpu->iem.s.cActiveMappings);
11058 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11059}
11060
11061
11062/**
11063 * Interface for HM and EM to clear the CR0[TS] bit.
11064 *
11065 * @returns Strict VBox status code.
11066 * @param pVCpu The cross context virtual CPU structure.
11067 * @param cbInstr The instruction length in bytes.
11068 *
11069 * @remarks In ring-0 not all of the state needs to be synced in.
11070 */
11071VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
11072{
11073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11074
11075 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11076 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
11077 Assert(!pVCpu->iem.s.cActiveMappings);
11078 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11079}
11080
11081
11082/**
11083 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
11084 *
11085 * @returns Strict VBox status code.
11086 * @param pVCpu The cross context virtual CPU structure.
11087 * @param cbInstr The instruction length in bytes.
11088 * @param uValue The value to load into CR0.
11089 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
11090 * memory operand. Otherwise pass NIL_RTGCPTR.
11091 *
11092 * @remarks In ring-0 not all of the state needs to be synced in.
11093 */
11094VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
11095{
11096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11097
11098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11099 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
11100 Assert(!pVCpu->iem.s.cActiveMappings);
11101 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11102}
11103
11104
11105/**
11106 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
11107 *
11108 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
11109 *
11110 * @returns Strict VBox status code.
11111 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11112 * @param cbInstr The instruction length in bytes.
11113 * @remarks In ring-0 not all of the state needs to be synced in.
11114 * @thread EMT(pVCpu)
11115 */
11116VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
11117{
11118 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11119
11120 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11121 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
11122 Assert(!pVCpu->iem.s.cActiveMappings);
11123 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11124}
11125
11126
11127/**
11128 * Interface for HM and EM to emulate the WBINVD instruction.
11129 *
11130 * @returns Strict VBox status code.
11131 * @param pVCpu The cross context virtual CPU structure.
11132 * @param cbInstr The instruction length in bytes.
11133 *
11134 * @remarks In ring-0 not all of the state needs to be synced in.
11135 */
11136VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11137{
11138 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11139
11140 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11141 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
11142 Assert(!pVCpu->iem.s.cActiveMappings);
11143 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11144}
11145
11146
11147/**
11148 * Interface for HM and EM to emulate the INVD instruction.
11149 *
11150 * @returns Strict VBox status code.
11151 * @param pVCpu The cross context virtual CPU structure.
11152 * @param cbInstr The instruction length in bytes.
11153 *
11154 * @remarks In ring-0 not all of the state needs to be synced in.
11155 */
11156VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
11157{
11158 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11159
11160 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11161 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
11162 Assert(!pVCpu->iem.s.cActiveMappings);
11163 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11164}
11165
11166
11167/**
11168 * Interface for HM and EM to emulate the INVLPG instruction.
11169 *
11170 * @returns Strict VBox status code.
11171 * @retval VINF_PGM_SYNC_CR3
11172 *
11173 * @param pVCpu The cross context virtual CPU structure.
11174 * @param cbInstr The instruction length in bytes.
11175 * @param GCPtrPage The effective address of the page to invalidate.
11176 *
11177 * @remarks In ring-0 not all of the state needs to be synced in.
11178 */
11179VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
11180{
11181 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11182
11183 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11184 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
11185 Assert(!pVCpu->iem.s.cActiveMappings);
11186 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11187}
11188
11189
11190/**
11191 * Interface for HM and EM to emulate the INVPCID instruction.
11192 *
11193 * @returns Strict VBox status code.
11194 * @retval VINF_PGM_SYNC_CR3
11195 *
11196 * @param pVCpu The cross context virtual CPU structure.
11197 * @param cbInstr The instruction length in bytes.
11198 * @param iEffSeg The effective segment register.
11199 * @param GCPtrDesc The effective address of the INVPCID descriptor.
11200 * @param uType The invalidation type.
11201 *
11202 * @remarks In ring-0 not all of the state needs to be synced in.
11203 */
11204VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
11205 uint64_t uType)
11206{
11207 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
11208
11209 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11210 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
11211 Assert(!pVCpu->iem.s.cActiveMappings);
11212 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11213}
11214
11215
11216/**
11217 * Interface for HM and EM to emulate the CPUID instruction.
11218 *
11219 * @returns Strict VBox status code.
11220 *
11221 * @param pVCpu The cross context virtual CPU structure.
11222 * @param cbInstr The instruction length in bytes.
11223 *
11224 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
11225 */
11226VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
11227{
11228 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11229 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
11230
11231 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11232 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
11233 Assert(!pVCpu->iem.s.cActiveMappings);
11234 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11235}
11236
11237
11238/**
11239 * Interface for HM and EM to emulate the RDPMC instruction.
11240 *
11241 * @returns Strict VBox status code.
11242 *
11243 * @param pVCpu The cross context virtual CPU structure.
11244 * @param cbInstr The instruction length in bytes.
11245 *
11246 * @remarks Not all of the state needs to be synced in.
11247 */
11248VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
11249{
11250 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11251 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11252
11253 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11254 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
11255 Assert(!pVCpu->iem.s.cActiveMappings);
11256 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11257}
11258
11259
11260/**
11261 * Interface for HM and EM to emulate the RDTSC instruction.
11262 *
11263 * @returns Strict VBox status code.
11264 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11265 *
11266 * @param pVCpu The cross context virtual CPU structure.
11267 * @param cbInstr The instruction length in bytes.
11268 *
11269 * @remarks Not all of the state needs to be synced in.
11270 */
11271VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
11272{
11273 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11274 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
11275
11276 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11277 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
11278 Assert(!pVCpu->iem.s.cActiveMappings);
11279 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11280}
11281
11282
11283/**
11284 * Interface for HM and EM to emulate the RDTSCP instruction.
11285 *
11286 * @returns Strict VBox status code.
11287 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11288 *
11289 * @param pVCpu The cross context virtual CPU structure.
11290 * @param cbInstr The instruction length in bytes.
11291 *
11292 * @remarks Not all of the state needs to be synced in. Recommended
11293 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
11294 */
11295VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
11296{
11297 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11298 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11299
11300 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11301 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11302 Assert(!pVCpu->iem.s.cActiveMappings);
11303 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11304}
11305
11306
11307/**
11308 * Interface for HM and EM to emulate the RDMSR instruction.
11309 *
11310 * @returns Strict VBox status code.
11311 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11312 *
11313 * @param pVCpu The cross context virtual CPU structure.
11314 * @param cbInstr The instruction length in bytes.
11315 *
11316 * @remarks Not all of the state needs to be synced in. Requires RCX and
11317 * (currently) all MSRs.
11318 */
11319VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11320{
11321 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11322 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11323
11324 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11325 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11326 Assert(!pVCpu->iem.s.cActiveMappings);
11327 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11328}
11329
11330
11331/**
11332 * Interface for HM and EM to emulate the WRMSR instruction.
11333 *
11334 * @returns Strict VBox status code.
11335 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11336 *
11337 * @param pVCpu The cross context virtual CPU structure.
11338 * @param cbInstr The instruction length in bytes.
11339 *
11340 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11341 * and (currently) all MSRs.
11342 */
11343VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11344{
11345 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11346 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11347 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11348
11349 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11350 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11351 Assert(!pVCpu->iem.s.cActiveMappings);
11352 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11353}
11354
11355
11356/**
11357 * Interface for HM and EM to emulate the MONITOR instruction.
11358 *
11359 * @returns Strict VBox status code.
11360 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11361 *
11362 * @param pVCpu The cross context virtual CPU structure.
11363 * @param cbInstr The instruction length in bytes.
11364 *
11365 * @remarks Not all of the state needs to be synced in.
11366 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11367 * are used.
11368 */
11369VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11370{
11371 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11372 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11373
11374 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11375 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11376 Assert(!pVCpu->iem.s.cActiveMappings);
11377 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11378}
11379
11380
11381/**
11382 * Interface for HM and EM to emulate the MWAIT instruction.
11383 *
11384 * @returns Strict VBox status code.
11385 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11386 *
11387 * @param pVCpu The cross context virtual CPU structure.
11388 * @param cbInstr The instruction length in bytes.
11389 *
11390 * @remarks Not all of the state needs to be synced in.
11391 */
11392VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11393{
11394 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11395 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11396
11397 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11398 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11399 Assert(!pVCpu->iem.s.cActiveMappings);
11400 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11401}
11402
11403
11404/**
11405 * Interface for HM and EM to emulate the HLT instruction.
11406 *
11407 * @returns Strict VBox status code.
11408 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11409 *
11410 * @param pVCpu The cross context virtual CPU structure.
11411 * @param cbInstr The instruction length in bytes.
11412 *
11413 * @remarks Not all of the state needs to be synced in.
11414 */
11415VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11416{
11417 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11418
11419 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11420 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11421 Assert(!pVCpu->iem.s.cActiveMappings);
11422 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11423}
11424
11425
11426/**
11427 * Checks if IEM is in the process of delivering an event (interrupt or
11428 * exception).
11429 *
11430 * @returns true if we're in the process of raising an interrupt or exception,
11431 * false otherwise.
11432 * @param pVCpu The cross context virtual CPU structure.
11433 * @param puVector Where to store the vector associated with the
11434 * currently delivered event, optional.
11435 * @param pfFlags Where to store th event delivery flags (see
11436 * IEM_XCPT_FLAGS_XXX), optional.
11437 * @param puErr Where to store the error code associated with the
11438 * event, optional.
11439 * @param puCr2 Where to store the CR2 associated with the event,
11440 * optional.
11441 * @remarks The caller should check the flags to determine if the error code and
11442 * CR2 are valid for the event.
11443 */
11444VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11445{
11446 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11447 if (fRaisingXcpt)
11448 {
11449 if (puVector)
11450 *puVector = pVCpu->iem.s.uCurXcpt;
11451 if (pfFlags)
11452 *pfFlags = pVCpu->iem.s.fCurXcpt;
11453 if (puErr)
11454 *puErr = pVCpu->iem.s.uCurXcptErr;
11455 if (puCr2)
11456 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11457 }
11458 return fRaisingXcpt;
11459}
11460
11461#ifdef IN_RING3
11462
11463/**
11464 * Handles the unlikely and probably fatal merge cases.
11465 *
11466 * @returns Merged status code.
11467 * @param rcStrict Current EM status code.
11468 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11469 * with @a rcStrict.
11470 * @param iMemMap The memory mapping index. For error reporting only.
11471 * @param pVCpu The cross context virtual CPU structure of the calling
11472 * thread, for error reporting only.
11473 */
11474DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11475 unsigned iMemMap, PVMCPUCC pVCpu)
11476{
11477 if (RT_FAILURE_NP(rcStrict))
11478 return rcStrict;
11479
11480 if (RT_FAILURE_NP(rcStrictCommit))
11481 return rcStrictCommit;
11482
11483 if (rcStrict == rcStrictCommit)
11484 return rcStrictCommit;
11485
11486 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11487 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11488 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11489 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11490 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11491 return VERR_IOM_FF_STATUS_IPE;
11492}
11493
11494
11495/**
11496 * Helper for IOMR3ProcessForceFlag.
11497 *
11498 * @returns Merged status code.
11499 * @param rcStrict Current EM status code.
11500 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11501 * with @a rcStrict.
11502 * @param iMemMap The memory mapping index. For error reporting only.
11503 * @param pVCpu The cross context virtual CPU structure of the calling
11504 * thread, for error reporting only.
11505 */
11506DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11507{
11508 /* Simple. */
11509 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11510 return rcStrictCommit;
11511
11512 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11513 return rcStrict;
11514
11515 /* EM scheduling status codes. */
11516 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11517 && rcStrict <= VINF_EM_LAST))
11518 {
11519 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11520 && rcStrictCommit <= VINF_EM_LAST))
11521 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11522 }
11523
11524 /* Unlikely */
11525 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11526}
11527
11528
11529/**
11530 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11531 *
11532 * @returns Merge between @a rcStrict and what the commit operation returned.
11533 * @param pVM The cross context VM structure.
11534 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11535 * @param rcStrict The status code returned by ring-0 or raw-mode.
11536 */
11537VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11538{
11539 /*
11540 * Reset the pending commit.
11541 */
11542 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11543 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11544 ("%#x %#x %#x\n",
11545 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11546 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11547
11548 /*
11549 * Commit the pending bounce buffers (usually just one).
11550 */
11551 unsigned cBufs = 0;
11552 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11553 while (iMemMap-- > 0)
11554 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11555 {
11556 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11557 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11558 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11559
11560 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11561 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11562 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11563
11564 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11565 {
11566 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11567 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11568 pbBuf,
11569 cbFirst,
11570 PGMACCESSORIGIN_IEM);
11571 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11572 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11573 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11574 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11575 }
11576
11577 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11578 {
11579 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11580 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11581 pbBuf + cbFirst,
11582 cbSecond,
11583 PGMACCESSORIGIN_IEM);
11584 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11585 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11586 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11587 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11588 }
11589 cBufs++;
11590 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11591 }
11592
11593 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11594 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11595 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11596 pVCpu->iem.s.cActiveMappings = 0;
11597 return rcStrict;
11598}
11599
11600#endif /* IN_RING3 */
11601
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette