1 | /* $Id: IEMAll.cpp 106061 2024-09-16 14:03:52Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Interpreted Execution Manager - All Contexts.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2011-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /** @page pg_iem IEM - Interpreted Execution Manager
|
---|
30 | *
|
---|
31 | * The interpreted exeuction manager (IEM) is for executing short guest code
|
---|
32 | * sequences that are causing too many exits / virtualization traps. It will
|
---|
33 | * also be used to interpret single instructions, thus replacing the selective
|
---|
34 | * interpreters in EM and IOM.
|
---|
35 | *
|
---|
36 | * Design goals:
|
---|
37 | * - Relatively small footprint, although we favour speed and correctness
|
---|
38 | * over size.
|
---|
39 | * - Reasonably fast.
|
---|
40 | * - Correctly handle lock prefixed instructions.
|
---|
41 | * - Complete instruction set - eventually.
|
---|
42 | * - Refactorable into a recompiler, maybe.
|
---|
43 | * - Replace EMInterpret*.
|
---|
44 | *
|
---|
45 | * Using the existing disassembler has been considered, however this is thought
|
---|
46 | * to conflict with speed as the disassembler chews things a bit too much while
|
---|
47 | * leaving us with a somewhat complicated state to interpret afterwards.
|
---|
48 | *
|
---|
49 | *
|
---|
50 | * The current code is very much work in progress. You've been warned!
|
---|
51 | *
|
---|
52 | *
|
---|
53 | * @section sec_iem_fpu_instr FPU Instructions
|
---|
54 | *
|
---|
55 | * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
|
---|
56 | * same or equivalent instructions on the host FPU. To make life easy, we also
|
---|
57 | * let the FPU prioritize the unmasked exceptions for us. This however, only
|
---|
58 | * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
|
---|
59 | * for FPU exception delivery, because with CR0.NE=0 there is a window where we
|
---|
60 | * can trigger spurious FPU exceptions.
|
---|
61 | *
|
---|
62 | * The guest FPU state is not loaded into the host CPU and kept there till we
|
---|
63 | * leave IEM because the calling conventions have declared an all year open
|
---|
64 | * season on much of the FPU state. For instance an innocent looking call to
|
---|
65 | * memcpy might end up using a whole bunch of XMM or MM registers if the
|
---|
66 | * particular implementation finds it worthwhile.
|
---|
67 | *
|
---|
68 | *
|
---|
69 | * @section sec_iem_logging Logging
|
---|
70 | *
|
---|
71 | * The IEM code uses the \"IEM\" log group for the main logging. The different
|
---|
72 | * logging levels/flags are generally used for the following purposes:
|
---|
73 | * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
|
---|
74 | * - Flow (LogFlow) : Basic enter/exit IEM state info.
|
---|
75 | * - Level 2 (Log2) : ?
|
---|
76 | * - Level 3 (Log3) : More detailed enter/exit IEM state info.
|
---|
77 | * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
|
---|
78 | * - Level 5 (Log5) : Decoding details.
|
---|
79 | * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
|
---|
80 | * - Level 7 (Log7) : iret++ execution logging.
|
---|
81 | * - Level 8 (Log8) :
|
---|
82 | * - Level 9 (Log9) :
|
---|
83 | * - Level 10 (Log10): TLBs.
|
---|
84 | * - Level 11 (Log11): Unmasked FPU exceptions.
|
---|
85 | *
|
---|
86 | * The \"IEM_MEM\" log group covers most of memory related details logging,
|
---|
87 | * except for errors and exceptions:
|
---|
88 | * - Level 1 (Log) : Reads.
|
---|
89 | * - Level 2 (Log2) : Read fallbacks.
|
---|
90 | * - Level 3 (Log3) : MemMap read.
|
---|
91 | * - Level 4 (Log4) : MemMap read fallbacks.
|
---|
92 | * - Level 5 (Log5) : Writes
|
---|
93 | * - Level 6 (Log6) : Write fallbacks.
|
---|
94 | * - Level 7 (Log7) : MemMap writes and read-writes.
|
---|
95 | * - Level 8 (Log8) : MemMap write and read-write fallbacks.
|
---|
96 | * - Level 9 (Log9) : Stack reads.
|
---|
97 | * - Level 10 (Log10): Stack read fallbacks.
|
---|
98 | * - Level 11 (Log11): Stack writes.
|
---|
99 | * - Level 12 (Log12): Stack write fallbacks.
|
---|
100 | * - Flow (LogFlow) :
|
---|
101 | *
|
---|
102 | * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
|
---|
103 | * - Level 1 (Log) : Errors and other major events.
|
---|
104 | * - Flow (LogFlow) : Misc flow stuff (cleanup?)
|
---|
105 | * - Level 2 (Log2) : VM exits.
|
---|
106 | *
|
---|
107 | * The syscall logging level assignments:
|
---|
108 | * - Level 1: DOS and BIOS.
|
---|
109 | * - Level 2: Windows 3.x
|
---|
110 | * - Level 3: Linux.
|
---|
111 | */
|
---|
112 |
|
---|
113 | /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
|
---|
114 | #ifdef _MSC_VER
|
---|
115 | # pragma warning(disable:4505)
|
---|
116 | #endif
|
---|
117 |
|
---|
118 |
|
---|
119 | /*********************************************************************************************************************************
|
---|
120 | * Header Files *
|
---|
121 | *********************************************************************************************************************************/
|
---|
122 | #define LOG_GROUP LOG_GROUP_IEM
|
---|
123 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
124 | #include <VBox/vmm/iem.h>
|
---|
125 | #include <VBox/vmm/cpum.h>
|
---|
126 | #include <VBox/vmm/apic.h>
|
---|
127 | #include <VBox/vmm/pdm.h>
|
---|
128 | #include <VBox/vmm/pgm.h>
|
---|
129 | #include <VBox/vmm/iom.h>
|
---|
130 | #include <VBox/vmm/em.h>
|
---|
131 | #include <VBox/vmm/hm.h>
|
---|
132 | #include <VBox/vmm/nem.h>
|
---|
133 | #include <VBox/vmm/gcm.h>
|
---|
134 | #include <VBox/vmm/gim.h>
|
---|
135 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
136 | # include <VBox/vmm/em.h>
|
---|
137 | # include <VBox/vmm/hm_svm.h>
|
---|
138 | #endif
|
---|
139 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
140 | # include <VBox/vmm/hmvmxinline.h>
|
---|
141 | #endif
|
---|
142 | #include <VBox/vmm/tm.h>
|
---|
143 | #include <VBox/vmm/dbgf.h>
|
---|
144 | #include <VBox/vmm/dbgftrace.h>
|
---|
145 | #include "IEMInternal.h"
|
---|
146 | #include <VBox/vmm/vmcc.h>
|
---|
147 | #include <VBox/log.h>
|
---|
148 | #include <VBox/err.h>
|
---|
149 | #include <VBox/param.h>
|
---|
150 | #include <VBox/dis.h>
|
---|
151 | #include <iprt/asm-math.h>
|
---|
152 | #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
|
---|
153 | # include <iprt/asm-amd64-x86.h>
|
---|
154 | #elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
|
---|
155 | # include <iprt/asm-arm.h>
|
---|
156 | #endif
|
---|
157 | #include <iprt/assert.h>
|
---|
158 | #include <iprt/string.h>
|
---|
159 | #include <iprt/x86.h>
|
---|
160 |
|
---|
161 | #include "IEMInline.h"
|
---|
162 |
|
---|
163 |
|
---|
164 | /*********************************************************************************************************************************
|
---|
165 | * Structures and Typedefs *
|
---|
166 | *********************************************************************************************************************************/
|
---|
167 | /**
|
---|
168 | * CPU exception classes.
|
---|
169 | */
|
---|
170 | typedef enum IEMXCPTCLASS
|
---|
171 | {
|
---|
172 | IEMXCPTCLASS_BENIGN,
|
---|
173 | IEMXCPTCLASS_CONTRIBUTORY,
|
---|
174 | IEMXCPTCLASS_PAGE_FAULT,
|
---|
175 | IEMXCPTCLASS_DOUBLE_FAULT
|
---|
176 | } IEMXCPTCLASS;
|
---|
177 |
|
---|
178 |
|
---|
179 | /*********************************************************************************************************************************
|
---|
180 | * Global Variables *
|
---|
181 | *********************************************************************************************************************************/
|
---|
182 | #if defined(IEM_LOG_MEMORY_WRITES)
|
---|
183 | /** What IEM just wrote. */
|
---|
184 | uint8_t g_abIemWrote[256];
|
---|
185 | /** How much IEM just wrote. */
|
---|
186 | size_t g_cbIemWrote;
|
---|
187 | #endif
|
---|
188 |
|
---|
189 |
|
---|
190 | /*********************************************************************************************************************************
|
---|
191 | * Internal Functions *
|
---|
192 | *********************************************************************************************************************************/
|
---|
193 | static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
|
---|
194 | uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
|
---|
195 |
|
---|
196 |
|
---|
197 | /**
|
---|
198 | * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
|
---|
199 | * path.
|
---|
200 | *
|
---|
201 | * This will also invalidate TLB entries for any pages with active data
|
---|
202 | * breakpoints on them.
|
---|
203 | *
|
---|
204 | * @returns IEM_F_BRK_PENDING_XXX or zero.
|
---|
205 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
206 | * calling thread.
|
---|
207 | *
|
---|
208 | * @note Don't call directly, use iemCalcExecDbgFlags instead.
|
---|
209 | */
|
---|
210 | uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
|
---|
211 | {
|
---|
212 | uint32_t fExec = 0;
|
---|
213 |
|
---|
214 | /*
|
---|
215 | * Helper for invalidate the data TLB for breakpoint addresses.
|
---|
216 | *
|
---|
217 | * This is to make sure any access to the page will always trigger a TLB
|
---|
218 | * load for as long as the breakpoint is enabled.
|
---|
219 | */
|
---|
220 | #ifdef IEM_WITH_DATA_TLB
|
---|
221 | # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
|
---|
222 | RTGCPTR uTagNoRev = (a_uValue); \
|
---|
223 | uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
|
---|
224 | /** @todo do large page accounting */ \
|
---|
225 | uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
|
---|
226 | if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
|
---|
227 | pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
|
---|
228 | if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
|
---|
229 | pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
|
---|
230 | } while (0)
|
---|
231 | #else
|
---|
232 | # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
|
---|
233 | #endif
|
---|
234 |
|
---|
235 | /*
|
---|
236 | * Process guest breakpoints.
|
---|
237 | */
|
---|
238 | #define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
|
---|
239 | if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
|
---|
240 | { \
|
---|
241 | switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
|
---|
242 | { \
|
---|
243 | case X86_DR7_RW_EO: \
|
---|
244 | fExec |= IEM_F_PENDING_BRK_INSTR; \
|
---|
245 | break; \
|
---|
246 | case X86_DR7_RW_WO: \
|
---|
247 | case X86_DR7_RW_RW: \
|
---|
248 | fExec |= IEM_F_PENDING_BRK_DATA; \
|
---|
249 | INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
|
---|
250 | break; \
|
---|
251 | case X86_DR7_RW_IO: \
|
---|
252 | fExec |= IEM_F_PENDING_BRK_X86_IO; \
|
---|
253 | break; \
|
---|
254 | } \
|
---|
255 | } \
|
---|
256 | } while (0)
|
---|
257 |
|
---|
258 | uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
|
---|
259 | if (fGstDr7 & X86_DR7_ENABLED_MASK)
|
---|
260 | {
|
---|
261 | /** @todo extract more details here to simplify matching later. */
|
---|
262 | #ifdef IEM_WITH_DATA_TLB
|
---|
263 | IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
|
---|
264 | #endif
|
---|
265 | PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
|
---|
266 | PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
|
---|
267 | PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
|
---|
268 | PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
|
---|
269 | }
|
---|
270 |
|
---|
271 | /*
|
---|
272 | * Process hypervisor breakpoints.
|
---|
273 | */
|
---|
274 | PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
|
---|
275 | uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
|
---|
276 | if (fHyperDr7 & X86_DR7_ENABLED_MASK)
|
---|
277 | {
|
---|
278 | /** @todo extract more details here to simplify matching later. */
|
---|
279 | PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
|
---|
280 | PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
|
---|
281 | PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
|
---|
282 | PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
|
---|
283 | }
|
---|
284 |
|
---|
285 | return fExec;
|
---|
286 | }
|
---|
287 |
|
---|
288 |
|
---|
289 | /**
|
---|
290 | * Initializes the decoder state.
|
---|
291 | *
|
---|
292 | * iemReInitDecoder is mostly a copy of this function.
|
---|
293 | *
|
---|
294 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
295 | * calling thread.
|
---|
296 | * @param fExecOpts Optional execution flags:
|
---|
297 | * - IEM_F_BYPASS_HANDLERS
|
---|
298 | * - IEM_F_X86_DISREGARD_LOCK
|
---|
299 | */
|
---|
300 | DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
|
---|
301 | {
|
---|
302 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
|
---|
303 | Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
|
---|
304 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
|
---|
305 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
|
---|
306 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
|
---|
307 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
|
---|
308 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
|
---|
309 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
|
---|
310 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
|
---|
311 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
|
---|
312 |
|
---|
313 | /* Execution state: */
|
---|
314 | uint32_t fExec;
|
---|
315 | pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
|
---|
316 |
|
---|
317 | /* Decoder state: */
|
---|
318 | pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
|
---|
319 | pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
|
---|
320 | if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
|
---|
321 | {
|
---|
322 | pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
|
---|
323 | pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
|
---|
324 | }
|
---|
325 | else
|
---|
326 | {
|
---|
327 | pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
|
---|
328 | pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
|
---|
329 | }
|
---|
330 | pVCpu->iem.s.fPrefixes = 0;
|
---|
331 | pVCpu->iem.s.uRexReg = 0;
|
---|
332 | pVCpu->iem.s.uRexB = 0;
|
---|
333 | pVCpu->iem.s.uRexIndex = 0;
|
---|
334 | pVCpu->iem.s.idxPrefix = 0;
|
---|
335 | pVCpu->iem.s.uVex3rdReg = 0;
|
---|
336 | pVCpu->iem.s.uVexLength = 0;
|
---|
337 | pVCpu->iem.s.fEvexStuff = 0;
|
---|
338 | pVCpu->iem.s.iEffSeg = X86_SREG_DS;
|
---|
339 | #ifdef IEM_WITH_CODE_TLB
|
---|
340 | pVCpu->iem.s.pbInstrBuf = NULL;
|
---|
341 | pVCpu->iem.s.offInstrNextByte = 0;
|
---|
342 | pVCpu->iem.s.offCurInstrStart = 0;
|
---|
343 | # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
|
---|
344 | pVCpu->iem.s.offOpcode = 0;
|
---|
345 | # endif
|
---|
346 | # ifdef VBOX_STRICT
|
---|
347 | pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
|
---|
348 | pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
|
---|
349 | pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
|
---|
350 | pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
|
---|
351 | # endif
|
---|
352 | #else
|
---|
353 | pVCpu->iem.s.offOpcode = 0;
|
---|
354 | pVCpu->iem.s.cbOpcode = 0;
|
---|
355 | #endif
|
---|
356 | pVCpu->iem.s.offModRm = 0;
|
---|
357 | pVCpu->iem.s.cActiveMappings = 0;
|
---|
358 | pVCpu->iem.s.iNextMapping = 0;
|
---|
359 | pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
|
---|
360 |
|
---|
361 | #ifdef DBGFTRACE_ENABLED
|
---|
362 | switch (IEM_GET_CPU_MODE(pVCpu))
|
---|
363 | {
|
---|
364 | case IEMMODE_64BIT:
|
---|
365 | RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
|
---|
366 | break;
|
---|
367 | case IEMMODE_32BIT:
|
---|
368 | RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
|
---|
369 | break;
|
---|
370 | case IEMMODE_16BIT:
|
---|
371 | RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
|
---|
372 | break;
|
---|
373 | }
|
---|
374 | #endif
|
---|
375 | }
|
---|
376 |
|
---|
377 |
|
---|
378 | /**
|
---|
379 | * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
|
---|
380 | *
|
---|
381 | * This is mostly a copy of iemInitDecoder.
|
---|
382 | *
|
---|
383 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
384 | */
|
---|
385 | DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
|
---|
386 | {
|
---|
387 | Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
|
---|
388 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
|
---|
389 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
|
---|
390 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
|
---|
391 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
|
---|
392 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
|
---|
393 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
|
---|
394 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
|
---|
395 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
|
---|
396 |
|
---|
397 | /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
|
---|
398 | AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
|
---|
399 | ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
|
---|
400 |
|
---|
401 | IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
|
---|
402 | pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
|
---|
403 | pVCpu->iem.s.enmEffAddrMode = enmMode;
|
---|
404 | if (enmMode != IEMMODE_64BIT)
|
---|
405 | {
|
---|
406 | pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
|
---|
407 | pVCpu->iem.s.enmEffOpSize = enmMode;
|
---|
408 | }
|
---|
409 | else
|
---|
410 | {
|
---|
411 | pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
|
---|
412 | pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
|
---|
413 | }
|
---|
414 | pVCpu->iem.s.fPrefixes = 0;
|
---|
415 | pVCpu->iem.s.uRexReg = 0;
|
---|
416 | pVCpu->iem.s.uRexB = 0;
|
---|
417 | pVCpu->iem.s.uRexIndex = 0;
|
---|
418 | pVCpu->iem.s.idxPrefix = 0;
|
---|
419 | pVCpu->iem.s.uVex3rdReg = 0;
|
---|
420 | pVCpu->iem.s.uVexLength = 0;
|
---|
421 | pVCpu->iem.s.fEvexStuff = 0;
|
---|
422 | pVCpu->iem.s.iEffSeg = X86_SREG_DS;
|
---|
423 | #ifdef IEM_WITH_CODE_TLB
|
---|
424 | if (pVCpu->iem.s.pbInstrBuf)
|
---|
425 | {
|
---|
426 | uint64_t off = (enmMode == IEMMODE_64BIT
|
---|
427 | ? pVCpu->cpum.GstCtx.rip
|
---|
428 | : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
|
---|
429 | - pVCpu->iem.s.uInstrBufPc;
|
---|
430 | if (off < pVCpu->iem.s.cbInstrBufTotal)
|
---|
431 | {
|
---|
432 | pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
|
---|
433 | pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
|
---|
434 | if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
|
---|
435 | pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
|
---|
436 | else
|
---|
437 | pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
|
---|
438 | }
|
---|
439 | else
|
---|
440 | {
|
---|
441 | pVCpu->iem.s.pbInstrBuf = NULL;
|
---|
442 | pVCpu->iem.s.offInstrNextByte = 0;
|
---|
443 | pVCpu->iem.s.offCurInstrStart = 0;
|
---|
444 | pVCpu->iem.s.cbInstrBuf = 0;
|
---|
445 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
446 | pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
|
---|
447 | }
|
---|
448 | }
|
---|
449 | else
|
---|
450 | {
|
---|
451 | pVCpu->iem.s.offInstrNextByte = 0;
|
---|
452 | pVCpu->iem.s.offCurInstrStart = 0;
|
---|
453 | pVCpu->iem.s.cbInstrBuf = 0;
|
---|
454 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
455 | # ifdef VBOX_STRICT
|
---|
456 | pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
|
---|
457 | # endif
|
---|
458 | }
|
---|
459 | # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
|
---|
460 | pVCpu->iem.s.offOpcode = 0;
|
---|
461 | # endif
|
---|
462 | #else /* !IEM_WITH_CODE_TLB */
|
---|
463 | pVCpu->iem.s.cbOpcode = 0;
|
---|
464 | pVCpu->iem.s.offOpcode = 0;
|
---|
465 | #endif /* !IEM_WITH_CODE_TLB */
|
---|
466 | pVCpu->iem.s.offModRm = 0;
|
---|
467 | Assert(pVCpu->iem.s.cActiveMappings == 0);
|
---|
468 | pVCpu->iem.s.iNextMapping = 0;
|
---|
469 | Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
|
---|
470 | Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
|
---|
471 |
|
---|
472 | #ifdef DBGFTRACE_ENABLED
|
---|
473 | switch (enmMode)
|
---|
474 | {
|
---|
475 | case IEMMODE_64BIT:
|
---|
476 | RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
|
---|
477 | break;
|
---|
478 | case IEMMODE_32BIT:
|
---|
479 | RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
|
---|
480 | break;
|
---|
481 | case IEMMODE_16BIT:
|
---|
482 | RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
|
---|
483 | break;
|
---|
484 | }
|
---|
485 | #endif
|
---|
486 | }
|
---|
487 |
|
---|
488 |
|
---|
489 |
|
---|
490 | /**
|
---|
491 | * Prefetch opcodes the first time when starting executing.
|
---|
492 | *
|
---|
493 | * @returns Strict VBox status code.
|
---|
494 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
495 | * calling thread.
|
---|
496 | * @param fExecOpts Optional execution flags:
|
---|
497 | * - IEM_F_BYPASS_HANDLERS
|
---|
498 | * - IEM_F_X86_DISREGARD_LOCK
|
---|
499 | */
|
---|
500 | static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
|
---|
501 | {
|
---|
502 | iemInitDecoder(pVCpu, fExecOpts);
|
---|
503 |
|
---|
504 | #ifndef IEM_WITH_CODE_TLB
|
---|
505 | /*
|
---|
506 | * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
|
---|
507 | *
|
---|
508 | * First translate CS:rIP to a physical address.
|
---|
509 | *
|
---|
510 | * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
|
---|
511 | * all relevant bytes from the first page, as it ASSUMES it's only ever
|
---|
512 | * called for dealing with CS.LIM, page crossing and instructions that
|
---|
513 | * are too long.
|
---|
514 | */
|
---|
515 | uint32_t cbToTryRead;
|
---|
516 | RTGCPTR GCPtrPC;
|
---|
517 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
518 | {
|
---|
519 | cbToTryRead = GUEST_PAGE_SIZE;
|
---|
520 | GCPtrPC = pVCpu->cpum.GstCtx.rip;
|
---|
521 | if (IEM_IS_CANONICAL(GCPtrPC))
|
---|
522 | cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
|
---|
523 | else
|
---|
524 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
525 | }
|
---|
526 | else
|
---|
527 | {
|
---|
528 | uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
|
---|
529 | AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
530 | if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
|
---|
531 | cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
|
---|
532 | else
|
---|
533 | return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
|
---|
534 | if (cbToTryRead) { /* likely */ }
|
---|
535 | else /* overflowed */
|
---|
536 | {
|
---|
537 | Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
|
---|
538 | cbToTryRead = UINT32_MAX;
|
---|
539 | }
|
---|
540 | GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
|
---|
541 | Assert(GCPtrPC <= UINT32_MAX);
|
---|
542 | }
|
---|
543 |
|
---|
544 | PGMPTWALKFAST WalkFast;
|
---|
545 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
|
---|
546 | IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
|
---|
547 | &WalkFast);
|
---|
548 | if (RT_SUCCESS(rc))
|
---|
549 | Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
|
---|
550 | else
|
---|
551 | {
|
---|
552 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
|
---|
553 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
554 | /** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
|
---|
555 | * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
|
---|
556 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
557 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
558 | # endif
|
---|
559 | return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
|
---|
560 | }
|
---|
561 | #if 0
|
---|
562 | if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
|
---|
563 | else
|
---|
564 | {
|
---|
565 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
|
---|
566 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
567 | /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
|
---|
568 | # error completely wrong
|
---|
569 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
570 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
|
---|
571 | # endif
|
---|
572 | return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
|
---|
573 | }
|
---|
574 | if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
|
---|
575 | else
|
---|
576 | {
|
---|
577 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
|
---|
578 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
579 | /** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
|
---|
580 | # error completely wrong.
|
---|
581 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
582 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
|
---|
583 | # endif
|
---|
584 | return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
|
---|
585 | }
|
---|
586 | #else
|
---|
587 | Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
|
---|
588 | Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
|
---|
589 | #endif
|
---|
590 | RTGCPHYS const GCPhys = WalkFast.GCPhys;
|
---|
591 |
|
---|
592 | /*
|
---|
593 | * Read the bytes at this address.
|
---|
594 | */
|
---|
595 | uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
|
---|
596 | if (cbToTryRead > cbLeftOnPage)
|
---|
597 | cbToTryRead = cbLeftOnPage;
|
---|
598 | if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
|
---|
599 | cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
|
---|
600 |
|
---|
601 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
602 | {
|
---|
603 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
|
---|
604 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
605 | { /* likely */ }
|
---|
606 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
607 | {
|
---|
608 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
|
---|
609 | GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
610 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
611 | }
|
---|
612 | else
|
---|
613 | {
|
---|
614 | Log((RT_SUCCESS(rcStrict)
|
---|
615 | ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
|
---|
616 | : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
|
---|
617 | GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
618 | return rcStrict;
|
---|
619 | }
|
---|
620 | }
|
---|
621 | else
|
---|
622 | {
|
---|
623 | rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
|
---|
624 | if (RT_SUCCESS(rc))
|
---|
625 | { /* likely */ }
|
---|
626 | else
|
---|
627 | {
|
---|
628 | Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
|
---|
629 | GCPtrPC, GCPhys, rc, cbToTryRead));
|
---|
630 | return rc;
|
---|
631 | }
|
---|
632 | }
|
---|
633 | pVCpu->iem.s.cbOpcode = cbToTryRead;
|
---|
634 | #endif /* !IEM_WITH_CODE_TLB */
|
---|
635 | return VINF_SUCCESS;
|
---|
636 | }
|
---|
637 |
|
---|
638 |
|
---|
639 | #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
|
---|
640 | /**
|
---|
641 | * Helper for doing large page accounting at TLB load time.
|
---|
642 | */
|
---|
643 | template<bool const a_fGlobal>
|
---|
644 | DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
|
---|
645 | {
|
---|
646 | if (a_fGlobal)
|
---|
647 | pTlb->cTlbGlobalLargePageCurLoads++;
|
---|
648 | else
|
---|
649 | pTlb->cTlbNonGlobalLargePageCurLoads++;
|
---|
650 |
|
---|
651 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
652 | RTGCPTR const idxBit = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + a_fGlobal;
|
---|
653 | ASMBitSet(pTlb->bmLargePage, idxBit);
|
---|
654 | # endif
|
---|
655 |
|
---|
656 | AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
|
---|
657 | uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
|
---|
658 | IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
|
---|
659 | ? &pTlb->GlobalLargePageRange
|
---|
660 | : &pTlb->NonGlobalLargePageRange;
|
---|
661 | uTagNoRev &= ~(RTGCPTR)fMask;
|
---|
662 | if (uTagNoRev < pRange->uFirstTag)
|
---|
663 | pRange->uFirstTag = uTagNoRev;
|
---|
664 |
|
---|
665 | uTagNoRev |= fMask;
|
---|
666 | if (uTagNoRev > pRange->uLastTag)
|
---|
667 | pRange->uLastTag = uTagNoRev;
|
---|
668 |
|
---|
669 | RT_NOREF_PV(pVCpu);
|
---|
670 | }
|
---|
671 | #endif
|
---|
672 |
|
---|
673 |
|
---|
674 | #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
|
---|
675 | /**
|
---|
676 | * Worker for iemTlbInvalidateAll.
|
---|
677 | */
|
---|
678 | template<bool a_fGlobal>
|
---|
679 | DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
|
---|
680 | {
|
---|
681 | if (!a_fGlobal)
|
---|
682 | pTlb->cTlsFlushes++;
|
---|
683 | else
|
---|
684 | pTlb->cTlsGlobalFlushes++;
|
---|
685 |
|
---|
686 | pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
|
---|
687 | if (RT_LIKELY(pTlb->uTlbRevision != 0))
|
---|
688 | { /* very likely */ }
|
---|
689 | else
|
---|
690 | {
|
---|
691 | pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
|
---|
692 | pTlb->cTlbRevisionRollovers++;
|
---|
693 | unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
|
---|
694 | while (i-- > 0)
|
---|
695 | pTlb->aEntries[i * 2].uTag = 0;
|
---|
696 | }
|
---|
697 |
|
---|
698 | pTlb->cTlbNonGlobalLargePageCurLoads = 0;
|
---|
699 | pTlb->NonGlobalLargePageRange.uLastTag = 0;
|
---|
700 | pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
|
---|
701 |
|
---|
702 | if (a_fGlobal)
|
---|
703 | {
|
---|
704 | pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
|
---|
705 | if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
|
---|
706 | { /* very likely */ }
|
---|
707 | else
|
---|
708 | {
|
---|
709 | pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
|
---|
710 | pTlb->cTlbRevisionRollovers++;
|
---|
711 | unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
|
---|
712 | while (i-- > 0)
|
---|
713 | pTlb->aEntries[i * 2 + 1].uTag = 0;
|
---|
714 | }
|
---|
715 |
|
---|
716 | pTlb->cTlbGlobalLargePageCurLoads = 0;
|
---|
717 | pTlb->GlobalLargePageRange.uLastTag = 0;
|
---|
718 | pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
|
---|
719 | }
|
---|
720 | }
|
---|
721 | #endif
|
---|
722 |
|
---|
723 |
|
---|
724 | /**
|
---|
725 | * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
|
---|
726 | */
|
---|
727 | template<bool a_fGlobal>
|
---|
728 | DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
|
---|
729 | {
|
---|
730 | #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
|
---|
731 | Log10(("IEMTlbInvalidateAll\n"));
|
---|
732 |
|
---|
733 | # ifdef IEM_WITH_CODE_TLB
|
---|
734 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
735 | iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
|
---|
736 | if (a_fGlobal)
|
---|
737 | IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
|
---|
738 | else
|
---|
739 | IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
|
---|
740 | # endif
|
---|
741 |
|
---|
742 | # ifdef IEM_WITH_DATA_TLB
|
---|
743 | iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
|
---|
744 | if (a_fGlobal)
|
---|
745 | IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
|
---|
746 | else
|
---|
747 | IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
|
---|
748 | # endif
|
---|
749 | #else
|
---|
750 | RT_NOREF(pVCpu);
|
---|
751 | #endif
|
---|
752 | }
|
---|
753 |
|
---|
754 |
|
---|
755 | /**
|
---|
756 | * Invalidates non-global the IEM TLB entries.
|
---|
757 | *
|
---|
758 | * This is called internally as well as by PGM when moving GC mappings.
|
---|
759 | *
|
---|
760 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
761 | * thread.
|
---|
762 | */
|
---|
763 | VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
|
---|
764 | {
|
---|
765 | iemTlbInvalidateAll<false>(pVCpu);
|
---|
766 | }
|
---|
767 |
|
---|
768 |
|
---|
769 | /**
|
---|
770 | * Invalidates all the IEM TLB entries.
|
---|
771 | *
|
---|
772 | * This is called internally as well as by PGM when moving GC mappings.
|
---|
773 | *
|
---|
774 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
775 | * thread.
|
---|
776 | */
|
---|
777 | VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
|
---|
778 | {
|
---|
779 | iemTlbInvalidateAll<true>(pVCpu);
|
---|
780 | }
|
---|
781 |
|
---|
782 |
|
---|
783 | #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
|
---|
784 |
|
---|
785 | /** @todo graduate this to cdefs.h or asm-mem.h. */
|
---|
786 | # ifdef RT_ARCH_ARM64 /** @todo RT_CACHELINE_SIZE is wrong for M1 */
|
---|
787 | # undef RT_CACHELINE_SIZE
|
---|
788 | # define RT_CACHELINE_SIZE 128
|
---|
789 | # endif
|
---|
790 |
|
---|
791 | # if defined(_MM_HINT_T0) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
|
---|
792 | # define MY_PREFETCH(a_pvAddr) _mm_prefetch((const char *)(a_pvAddr), _MM_HINT_T0)
|
---|
793 | # elif defined(_MSC_VER) && (defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32))
|
---|
794 | # define MY_PREFETCH(a_pvAddr) __prefetch((a_pvAddr))
|
---|
795 | # elif defined(__GNUC__) || RT_CLANG_HAS_FEATURE(__builtin_prefetch)
|
---|
796 | # define MY_PREFETCH(a_pvAddr) __builtin_prefetch((a_pvAddr), 0 /*rw*/, 3 /*locality*/)
|
---|
797 | # else
|
---|
798 | # define MY_PREFETCH(a_pvAddr) ((void)0)
|
---|
799 | # endif
|
---|
800 | # if 0
|
---|
801 | # undef MY_PREFETCH
|
---|
802 | # define MY_PREFETCH(a_pvAddr) ((void)0)
|
---|
803 | # endif
|
---|
804 |
|
---|
805 | /** @def MY_PREFETCH_64
|
---|
806 | * 64 byte prefetch hint, could be more depending on cache line size. */
|
---|
807 | /** @def MY_PREFETCH_128
|
---|
808 | * 128 byte prefetch hint. */
|
---|
809 | /** @def MY_PREFETCH_256
|
---|
810 | * 256 byte prefetch hint. */
|
---|
811 | # if RT_CACHELINE_SIZE >= 128
|
---|
812 | /* 128 byte cache lines */
|
---|
813 | # define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
|
---|
814 | # define MY_PREFETCH_128(a_pvAddr) MY_PREFETCH(a_pvAddr)
|
---|
815 | # define MY_PREFETCH_256(a_pvAddr) do { \
|
---|
816 | MY_PREFETCH(a_pvAddr); \
|
---|
817 | MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
|
---|
818 | } while (0)
|
---|
819 | # else
|
---|
820 | /* 64 byte cache lines */
|
---|
821 | # define MY_PREFETCH_64(a_pvAddr) MY_PREFETCH(a_pvAddr)
|
---|
822 | # define MY_PREFETCH_128(a_pvAddr) do { \
|
---|
823 | MY_PREFETCH(a_pvAddr); \
|
---|
824 | MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
|
---|
825 | } while (0)
|
---|
826 | # define MY_PREFETCH_256(a_pvAddr) do { \
|
---|
827 | MY_PREFETCH(a_pvAddr); \
|
---|
828 | MY_PREFETCH((uint8_t const *)a_pvAddr + 64); \
|
---|
829 | MY_PREFETCH((uint8_t const *)a_pvAddr + 128); \
|
---|
830 | MY_PREFETCH((uint8_t const *)a_pvAddr + 192); \
|
---|
831 | } while (0)
|
---|
832 | # endif
|
---|
833 |
|
---|
834 | template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
|
---|
835 | DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
|
---|
836 | RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
|
---|
837 | {
|
---|
838 | IEMTLBTRACE_LARGE_SCAN(pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb);
|
---|
839 | AssertCompile(IEMTLB_ENTRY_COUNT >= 16); /* prefetching + unroll assumption */
|
---|
840 |
|
---|
841 | if (a_fGlobal)
|
---|
842 | pTlb->cTlbInvlPgLargeGlobal += 1;
|
---|
843 | if (a_fNonGlobal)
|
---|
844 | pTlb->cTlbInvlPgLargeNonGlobal += 1;
|
---|
845 |
|
---|
846 | /*
|
---|
847 | * Set up the scan.
|
---|
848 | *
|
---|
849 | * GCPtrTagMask: A 2MB page consists of 512 4K pages, so a 256 TLB will map
|
---|
850 | * offset zero and offset 1MB to the same slot pair. Our GCPtrTag[Globl]
|
---|
851 | * values are for the range 0-1MB, or slots 0-256. So, we construct a mask
|
---|
852 | * that fold large page offsets 1MB-2MB into the 0-1MB range.
|
---|
853 | *
|
---|
854 | * For our example with 2MB pages and a 256 entry TLB: 0xfffffffffffffeff
|
---|
855 | *
|
---|
856 | * MY_PREFETCH: Hope that prefetching 256 bytes at the time is okay for
|
---|
857 | * relevant host architectures.
|
---|
858 | */
|
---|
859 | /** @todo benchmark this code from the guest side. */
|
---|
860 | bool const fPartialScan = IEMTLB_ENTRY_COUNT > (a_f2MbLargePage ? 512 : 1024);
|
---|
861 | #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
862 | uintptr_t idxBitmap = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) / 64 : 0;
|
---|
863 | uintptr_t const idxBitmapEnd = fPartialScan ? idxBitmap + ((a_f2MbLargePage ? 512 : 1024) * 2) / 64
|
---|
864 | : IEMTLB_ENTRY_COUNT * 2 / 64;
|
---|
865 | #else
|
---|
866 | uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
|
---|
867 | MY_PREFETCH_256(&pTlb->aEntries[idxEven + !a_fNonGlobal]);
|
---|
868 | uintptr_t const idxEvenEnd = fPartialScan ? idxEven + ((a_f2MbLargePage ? 512 : 1024) * 2) : IEMTLB_ENTRY_COUNT * 2;
|
---|
869 | #endif
|
---|
870 | RTGCPTR const GCPtrTagMask = fPartialScan ? ~(RTGCPTR)0
|
---|
871 | : ~(RTGCPTR)( (RT_BIT_32(a_f2MbLargePage ? 9 : 10) - 1U)
|
---|
872 | & ~(uint32_t)(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) - 1U));
|
---|
873 |
|
---|
874 | /*
|
---|
875 | * Set cbInstrBufTotal to zero if GCPtrInstrBufPcTag is within any of the tag ranges.
|
---|
876 | * We make ASSUMPTIONS about IEMTLB_CALC_TAG_NO_REV here.
|
---|
877 | */
|
---|
878 | AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
|
---|
879 | if ( !a_fDataTlb
|
---|
880 | && GCPtrInstrBufPcTag - GCPtrTag < (a_f2MbLargePage ? 512U : 1024U))
|
---|
881 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
882 |
|
---|
883 | /*
|
---|
884 | * Combine TAG values with the TLB revisions.
|
---|
885 | */
|
---|
886 | RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
|
---|
887 | if (a_fNonGlobal)
|
---|
888 | GCPtrTag |= pTlb->uTlbRevision;
|
---|
889 |
|
---|
890 | /*
|
---|
891 | * Do the scanning.
|
---|
892 | */
|
---|
893 | #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
894 | uint64_t const bmMask = a_fGlobal && a_fNonGlobal ? UINT64_MAX
|
---|
895 | : a_fGlobal ? UINT64_C(0xaaaaaaaaaaaaaaaa) : UINT64_C(0x5555555555555555);
|
---|
896 | /* Scan bitmap entries (64 bits at the time): */
|
---|
897 | for (;;)
|
---|
898 | {
|
---|
899 | # if 1
|
---|
900 | uint64_t bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
|
---|
901 | if (bmEntry)
|
---|
902 | {
|
---|
903 | /* Scan the non-zero 64-bit value in groups of 8 bits: */
|
---|
904 | uint64_t bmToClear = 0;
|
---|
905 | uintptr_t idxEven = idxBitmap * 64;
|
---|
906 | uint32_t idxTag = 0;
|
---|
907 | for (;;)
|
---|
908 | {
|
---|
909 | if (bmEntry & 0xff)
|
---|
910 | {
|
---|
911 | # define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
|
---|
912 | if (a_fNonGlobal) \
|
---|
913 | { \
|
---|
914 | if (bmEntry & a_bmNonGlobal) \
|
---|
915 | { \
|
---|
916 | Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
|
---|
917 | if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
|
---|
918 | { \
|
---|
919 | IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
|
---|
920 | pTlb->aEntries[a_idxEvenIter].GCPhys, \
|
---|
921 | a_idxEvenIter, a_fDataTlb); \
|
---|
922 | pTlb->aEntries[a_idxEvenIter].uTag = 0; \
|
---|
923 | bmToClearSub8 |= a_bmNonGlobal; \
|
---|
924 | } \
|
---|
925 | } \
|
---|
926 | else \
|
---|
927 | Assert( !(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
|
---|
928 | || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
|
---|
929 | != (GCPtrTag & IEMTLB_REVISION_MASK)); \
|
---|
930 | } \
|
---|
931 | if (a_fGlobal) \
|
---|
932 | { \
|
---|
933 | if (bmEntry & a_bmGlobal) \
|
---|
934 | { \
|
---|
935 | Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
|
---|
936 | if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
|
---|
937 | { \
|
---|
938 | IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
|
---|
939 | pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
|
---|
940 | a_idxEvenIter + 1, a_fDataTlb); \
|
---|
941 | pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
|
---|
942 | bmToClearSub8 |= a_bmGlobal; \
|
---|
943 | } \
|
---|
944 | } \
|
---|
945 | else \
|
---|
946 | Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
|
---|
947 | || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
|
---|
948 | != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
|
---|
949 | }
|
---|
950 | uint64_t bmToClearSub8 = 0;
|
---|
951 | ONE_PAIR(idxTag + 0, idxEven + 0, 0x01, 0x02)
|
---|
952 | ONE_PAIR(idxTag + 1, idxEven + 2, 0x04, 0x08)
|
---|
953 | ONE_PAIR(idxTag + 2, idxEven + 4, 0x10, 0x20)
|
---|
954 | ONE_PAIR(idxTag + 3, idxEven + 6, 0x40, 0x80)
|
---|
955 | bmToClear |= bmToClearSub8 << (idxTag * 2);
|
---|
956 | # undef ONE_PAIR
|
---|
957 | }
|
---|
958 |
|
---|
959 | /* advance to the next 8 bits. */
|
---|
960 | bmEntry >>= 8;
|
---|
961 | if (!bmEntry)
|
---|
962 | break;
|
---|
963 | idxEven += 8;
|
---|
964 | idxTag += 4;
|
---|
965 | }
|
---|
966 |
|
---|
967 | /* Clear the large page flags we covered. */
|
---|
968 | pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
|
---|
969 | }
|
---|
970 | # else
|
---|
971 | uint64_t const bmEntry = pTlb->bmLargePage[idxBitmap] & bmMask;
|
---|
972 | if (bmEntry)
|
---|
973 | {
|
---|
974 | /* Scan the non-zero 64-bit value completely unrolled: */
|
---|
975 | uintptr_t const idxEven = idxBitmap * 64;
|
---|
976 | uint64_t bmToClear = 0;
|
---|
977 | # define ONE_PAIR(a_idxTagIter, a_idxEvenIter, a_bmNonGlobal, a_bmGlobal) \
|
---|
978 | if (a_fNonGlobal) \
|
---|
979 | { \
|
---|
980 | if (bmEntry & a_bmNonGlobal) \
|
---|
981 | { \
|
---|
982 | Assert(pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
|
---|
983 | if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == (GCPtrTag + a_idxTagIter)) \
|
---|
984 | { \
|
---|
985 | IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag + a_idxTagIter, \
|
---|
986 | pTlb->aEntries[a_idxEvenIter].GCPhys, \
|
---|
987 | a_idxEvenIter, a_fDataTlb); \
|
---|
988 | pTlb->aEntries[a_idxEvenIter].uTag = 0; \
|
---|
989 | bmToClear |= a_bmNonGlobal; \
|
---|
990 | } \
|
---|
991 | } \
|
---|
992 | else \
|
---|
993 | Assert( !(pTlb->aEntriqes[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
|
---|
994 | || (pTlb->aEntries[a_idxEvenIter].uTag & IEMTLB_REVISION_MASK) \
|
---|
995 | != (GCPtrTag & IEMTLB_REVISION_MASK)); \
|
---|
996 | } \
|
---|
997 | if (a_fGlobal) \
|
---|
998 | { \
|
---|
999 | if (bmEntry & a_bmGlobal) \
|
---|
1000 | { \
|
---|
1001 | Assert(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE); \
|
---|
1002 | if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == (GCPtrTagGlob + a_idxTagIter)) \
|
---|
1003 | { \
|
---|
1004 | IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTagGlob + a_idxTagIter, \
|
---|
1005 | pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
|
---|
1006 | a_idxEvenIter + 1, a_fDataTlb); \
|
---|
1007 | pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
|
---|
1008 | bmToClear |= a_bmGlobal; \
|
---|
1009 | } \
|
---|
1010 | } \
|
---|
1011 | else \
|
---|
1012 | Assert( !(pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)\
|
---|
1013 | || (pTlb->aEntries[a_idxEvenIter + 1].uTag & IEMTLB_REVISION_MASK) \
|
---|
1014 | != (GCPtrTagGlob & IEMTLB_REVISION_MASK)); \
|
---|
1015 | } ((void)0)
|
---|
1016 | # define FOUR_PAIRS(a_iByte, a_cShift) \
|
---|
1017 | ONE_PAIR(0 + a_iByte * 4, idxEven + 0 + a_iByte * 8, UINT64_C(0x01) << a_cShift, UINT64_C(0x02) << a_cShift); \
|
---|
1018 | ONE_PAIR(1 + a_iByte * 4, idxEven + 2 + a_iByte * 8, UINT64_C(0x04) << a_cShift, UINT64_C(0x08) << a_cShift); \
|
---|
1019 | ONE_PAIR(2 + a_iByte * 4, idxEven + 4 + a_iByte * 8, UINT64_C(0x10) << a_cShift, UINT64_C(0x20) << a_cShift); \
|
---|
1020 | ONE_PAIR(3 + a_iByte * 4, idxEven + 6 + a_iByte * 8, UINT64_C(0x40) << a_cShift, UINT64_C(0x80) << a_cShift)
|
---|
1021 | if (bmEntry & (uint32_t)UINT16_MAX)
|
---|
1022 | {
|
---|
1023 | FOUR_PAIRS(0, 0);
|
---|
1024 | FOUR_PAIRS(1, 8);
|
---|
1025 | }
|
---|
1026 | if (bmEntry & ((uint32_t)UINT16_MAX << 16))
|
---|
1027 | {
|
---|
1028 | FOUR_PAIRS(2, 16);
|
---|
1029 | FOUR_PAIRS(3, 24);
|
---|
1030 | }
|
---|
1031 | if (bmEntry & ((uint64_t)UINT16_MAX << 32))
|
---|
1032 | {
|
---|
1033 | FOUR_PAIRS(4, 32);
|
---|
1034 | FOUR_PAIRS(5, 40);
|
---|
1035 | }
|
---|
1036 | if (bmEntry & ((uint64_t)UINT16_MAX << 16))
|
---|
1037 | {
|
---|
1038 | FOUR_PAIRS(6, 48);
|
---|
1039 | FOUR_PAIRS(7, 56);
|
---|
1040 | }
|
---|
1041 | # undef FOUR_PAIRS
|
---|
1042 |
|
---|
1043 | /* Clear the large page flags we covered. */
|
---|
1044 | pTlb->bmLargePage[idxBitmap] &= ~bmToClear;
|
---|
1045 | }
|
---|
1046 | # endif
|
---|
1047 |
|
---|
1048 | /* advance */
|
---|
1049 | idxBitmap++;
|
---|
1050 | if (idxBitmap >= idxBitmapEnd)
|
---|
1051 | break;
|
---|
1052 | if (a_fNonGlobal)
|
---|
1053 | GCPtrTag += 32;
|
---|
1054 | if (a_fGlobal)
|
---|
1055 | GCPtrTagGlob += 32;
|
---|
1056 | }
|
---|
1057 |
|
---|
1058 | #else /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
|
---|
1059 |
|
---|
1060 | for (; idxEven < idxEvenEnd; idxEven += 8)
|
---|
1061 | {
|
---|
1062 | # define ONE_ITERATION(a_idxEvenIter) \
|
---|
1063 | if (a_fNonGlobal) \
|
---|
1064 | { \
|
---|
1065 | if ((pTlb->aEntries[a_idxEvenIter].uTag & GCPtrTagMask) == GCPtrTag) \
|
---|
1066 | { \
|
---|
1067 | if (pTlb->aEntries[a_idxEvenIter].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
|
---|
1068 | { \
|
---|
1069 | IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter].GCPhys, \
|
---|
1070 | a_idxEvenIter, a_fDataTlb); \
|
---|
1071 | pTlb->aEntries[a_idxEvenIter].uTag = 0; \
|
---|
1072 | } \
|
---|
1073 | } \
|
---|
1074 | GCPtrTag++; \
|
---|
1075 | } \
|
---|
1076 | \
|
---|
1077 | if (a_fGlobal) \
|
---|
1078 | { \
|
---|
1079 | if ((pTlb->aEntries[a_idxEvenIter + 1].uTag & GCPtrTagMask) == GCPtrTagGlob) \
|
---|
1080 | { \
|
---|
1081 | if (pTlb->aEntries[a_idxEvenIter + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE) \
|
---|
1082 | { \
|
---|
1083 | IEMTLBTRACE_LARGE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[a_idxEvenIter + 1].GCPhys, \
|
---|
1084 | a_idxEvenIter + 1, a_fDataTlb); \
|
---|
1085 | pTlb->aEntries[a_idxEvenIter + 1].uTag = 0; \
|
---|
1086 | } \
|
---|
1087 | } \
|
---|
1088 | GCPtrTagGlob++; \
|
---|
1089 | }
|
---|
1090 | if (idxEven < idxEvenEnd - 4)
|
---|
1091 | MY_PREFETCH_256(&pTlb->aEntries[idxEven + 8 + !a_fNonGlobal]);
|
---|
1092 | ONE_ITERATION(idxEven)
|
---|
1093 | ONE_ITERATION(idxEven + 2)
|
---|
1094 | ONE_ITERATION(idxEven + 4)
|
---|
1095 | ONE_ITERATION(idxEven + 6)
|
---|
1096 | # undef ONE_ITERATION
|
---|
1097 | }
|
---|
1098 | #endif /* !IEMTLB_WITH_LARGE_PAGE_BITMAP */
|
---|
1099 | }
|
---|
1100 |
|
---|
1101 | template<bool const a_fDataTlb, bool const a_f2MbLargePage>
|
---|
1102 | DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag,
|
---|
1103 | RTGCPTR GCPtrInstrBufPcTag) RT_NOEXCEPT
|
---|
1104 | {
|
---|
1105 | AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
|
---|
1106 |
|
---|
1107 | GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
|
---|
1108 | if ( GCPtrTag >= pTlb->GlobalLargePageRange.uFirstTag
|
---|
1109 | && GCPtrTag <= pTlb->GlobalLargePageRange.uLastTag)
|
---|
1110 | {
|
---|
1111 | if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
|
---|
1112 | || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
|
---|
1113 | iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
|
---|
1114 | else
|
---|
1115 | iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
|
---|
1116 | }
|
---|
1117 | else if ( GCPtrTag < pTlb->NonGlobalLargePageRange.uFirstTag
|
---|
1118 | || GCPtrTag > pTlb->NonGlobalLargePageRange.uLastTag)
|
---|
1119 | {
|
---|
1120 | /* Large pages aren't as likely in the non-global TLB half. */
|
---|
1121 | IEMTLBTRACE_LARGE_SCAN(pVCpu, false, false, a_fDataTlb);
|
---|
1122 | }
|
---|
1123 | else
|
---|
1124 | iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
|
---|
1125 | }
|
---|
1126 |
|
---|
1127 | template<bool const a_fDataTlb>
|
---|
1128 | DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven) RT_NOEXCEPT
|
---|
1129 | {
|
---|
1130 | pTlb->cTlbInvlPg += 1;
|
---|
1131 |
|
---|
1132 | /*
|
---|
1133 | * Flush the entry pair.
|
---|
1134 | */
|
---|
1135 | if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
|
---|
1136 | {
|
---|
1137 | IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven].GCPhys, idxEven, a_fDataTlb);
|
---|
1138 | pTlb->aEntries[idxEven].uTag = 0;
|
---|
1139 | if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
|
---|
1140 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
1141 | }
|
---|
1142 | if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
|
---|
1143 | {
|
---|
1144 | IEMTLBTRACE_EVICT_SLOT(pVCpu, GCPtrTag, pTlb->aEntries[idxEven + 1].GCPhys, idxEven + 1, a_fDataTlb);
|
---|
1145 | pTlb->aEntries[idxEven + 1].uTag = 0;
|
---|
1146 | if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
|
---|
1147 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
1148 | }
|
---|
1149 |
|
---|
1150 | /*
|
---|
1151 | * If there are (or has been) large pages in the TLB, we must check if the
|
---|
1152 | * address being flushed may involve one of those, as then we'd have to
|
---|
1153 | * scan for entries relating to the same page and flush those as well.
|
---|
1154 | */
|
---|
1155 | # if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
|
---|
1156 | if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
|
---|
1157 | # else
|
---|
1158 | if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
|
---|
1159 | # endif
|
---|
1160 | {
|
---|
1161 | RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
|
---|
1162 | if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
|
---|
1163 | iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
|
---|
1164 | else
|
---|
1165 | iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
|
---|
1166 | }
|
---|
1167 | }
|
---|
1168 |
|
---|
1169 | #endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
|
---|
1170 |
|
---|
1171 | /**
|
---|
1172 | * Invalidates a page in the TLBs.
|
---|
1173 | *
|
---|
1174 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
1175 | * thread.
|
---|
1176 | * @param GCPtr The address of the page to invalidate
|
---|
1177 | * @thread EMT(pVCpu)
|
---|
1178 | */
|
---|
1179 | VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
|
---|
1180 | {
|
---|
1181 | IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
|
---|
1182 | #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
|
---|
1183 | Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
|
---|
1184 | GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
|
---|
1185 | Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
|
---|
1186 | uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
|
---|
1187 |
|
---|
1188 | # ifdef IEM_WITH_CODE_TLB
|
---|
1189 | iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
|
---|
1190 | # endif
|
---|
1191 | # ifdef IEM_WITH_DATA_TLB
|
---|
1192 | iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
|
---|
1193 | # endif
|
---|
1194 | #else
|
---|
1195 | NOREF(pVCpu); NOREF(GCPtr);
|
---|
1196 | #endif
|
---|
1197 | }
|
---|
1198 |
|
---|
1199 |
|
---|
1200 | #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
|
---|
1201 | /**
|
---|
1202 | * Invalid both TLBs slow fashion following a rollover.
|
---|
1203 | *
|
---|
1204 | * Worker for IEMTlbInvalidateAllPhysical,
|
---|
1205 | * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
|
---|
1206 | * iemMemMapJmp and others.
|
---|
1207 | *
|
---|
1208 | * @thread EMT(pVCpu)
|
---|
1209 | */
|
---|
1210 | static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
|
---|
1211 | {
|
---|
1212 | Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
|
---|
1213 | ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
|
---|
1214 | ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
|
---|
1215 |
|
---|
1216 | unsigned i;
|
---|
1217 | # ifdef IEM_WITH_CODE_TLB
|
---|
1218 | i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
|
---|
1219 | while (i-- > 0)
|
---|
1220 | {
|
---|
1221 | pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
|
---|
1222 | pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
|
---|
1223 | | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
|
---|
1224 | }
|
---|
1225 | pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
|
---|
1226 | pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
|
---|
1227 | # endif
|
---|
1228 | # ifdef IEM_WITH_DATA_TLB
|
---|
1229 | i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
|
---|
1230 | while (i-- > 0)
|
---|
1231 | {
|
---|
1232 | pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
|
---|
1233 | pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
|
---|
1234 | | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
|
---|
1235 | }
|
---|
1236 | pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
|
---|
1237 | pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
|
---|
1238 | # endif
|
---|
1239 |
|
---|
1240 | }
|
---|
1241 | #endif
|
---|
1242 |
|
---|
1243 |
|
---|
1244 | /**
|
---|
1245 | * Invalidates the host physical aspects of the IEM TLBs.
|
---|
1246 | *
|
---|
1247 | * This is called internally as well as by PGM when moving GC mappings.
|
---|
1248 | *
|
---|
1249 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
1250 | * thread.
|
---|
1251 | * @note Currently not used.
|
---|
1252 | */
|
---|
1253 | VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
|
---|
1254 | {
|
---|
1255 | #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
|
---|
1256 | /* Note! This probably won't end up looking exactly like this, but it give an idea... */
|
---|
1257 | Log10(("IEMTlbInvalidateAllPhysical\n"));
|
---|
1258 |
|
---|
1259 | # ifdef IEM_WITH_CODE_TLB
|
---|
1260 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
1261 | # endif
|
---|
1262 | uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
|
---|
1263 | if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
|
---|
1264 | {
|
---|
1265 | pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
|
---|
1266 | pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
|
---|
1267 | pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
|
---|
1268 | pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
|
---|
1269 | }
|
---|
1270 | else
|
---|
1271 | IEMTlbInvalidateAllPhysicalSlow(pVCpu);
|
---|
1272 | #else
|
---|
1273 | NOREF(pVCpu);
|
---|
1274 | #endif
|
---|
1275 | }
|
---|
1276 |
|
---|
1277 |
|
---|
1278 | /**
|
---|
1279 | * Invalidates the host physical aspects of the IEM TLBs.
|
---|
1280 | *
|
---|
1281 | * This is called internally as well as by PGM when moving GC mappings.
|
---|
1282 | *
|
---|
1283 | * @param pVM The cross context VM structure.
|
---|
1284 | * @param idCpuCaller The ID of the calling EMT if available to the caller,
|
---|
1285 | * otherwise NIL_VMCPUID.
|
---|
1286 | * @param enmReason The reason we're called.
|
---|
1287 | *
|
---|
1288 | * @remarks Caller holds the PGM lock.
|
---|
1289 | */
|
---|
1290 | VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
|
---|
1291 | {
|
---|
1292 | #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
|
---|
1293 | PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
|
---|
1294 | if (pVCpuCaller)
|
---|
1295 | VMCPU_ASSERT_EMT(pVCpuCaller);
|
---|
1296 | Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
|
---|
1297 |
|
---|
1298 | VMCC_FOR_EACH_VMCPU(pVM)
|
---|
1299 | {
|
---|
1300 | # ifdef IEM_WITH_CODE_TLB
|
---|
1301 | if (pVCpuCaller == pVCpu)
|
---|
1302 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
1303 | # endif
|
---|
1304 |
|
---|
1305 | uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
|
---|
1306 | uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
|
---|
1307 | if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
|
---|
1308 | { /* likely */}
|
---|
1309 | else if (pVCpuCaller != pVCpu)
|
---|
1310 | uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
|
---|
1311 | else
|
---|
1312 | {
|
---|
1313 | IEMTlbInvalidateAllPhysicalSlow(pVCpu);
|
---|
1314 | continue;
|
---|
1315 | }
|
---|
1316 | if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
|
---|
1317 | pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
|
---|
1318 |
|
---|
1319 | if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
|
---|
1320 | pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
|
---|
1321 | }
|
---|
1322 | VMCC_FOR_EACH_VMCPU_END(pVM);
|
---|
1323 |
|
---|
1324 | #else
|
---|
1325 | RT_NOREF(pVM, idCpuCaller, enmReason);
|
---|
1326 | #endif
|
---|
1327 | }
|
---|
1328 |
|
---|
1329 |
|
---|
1330 | /**
|
---|
1331 | * Flushes the prefetch buffer, light version.
|
---|
1332 | */
|
---|
1333 | void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
1334 | {
|
---|
1335 | #ifndef IEM_WITH_CODE_TLB
|
---|
1336 | pVCpu->iem.s.cbOpcode = cbInstr;
|
---|
1337 | #else
|
---|
1338 | RT_NOREF(pVCpu, cbInstr);
|
---|
1339 | #endif
|
---|
1340 | }
|
---|
1341 |
|
---|
1342 |
|
---|
1343 | /**
|
---|
1344 | * Flushes the prefetch buffer, heavy version.
|
---|
1345 | */
|
---|
1346 | void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
1347 | {
|
---|
1348 | #ifndef IEM_WITH_CODE_TLB
|
---|
1349 | pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
|
---|
1350 | #elif 1
|
---|
1351 | pVCpu->iem.s.cbInstrBufTotal = 0;
|
---|
1352 | RT_NOREF(cbInstr);
|
---|
1353 | #else
|
---|
1354 | RT_NOREF(pVCpu, cbInstr);
|
---|
1355 | #endif
|
---|
1356 | }
|
---|
1357 |
|
---|
1358 |
|
---|
1359 |
|
---|
1360 | #ifdef IEM_WITH_CODE_TLB
|
---|
1361 |
|
---|
1362 | /**
|
---|
1363 | * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
|
---|
1364 | * failure and jumps.
|
---|
1365 | *
|
---|
1366 | * We end up here for a number of reasons:
|
---|
1367 | * - pbInstrBuf isn't yet initialized.
|
---|
1368 | * - Advancing beyond the buffer boundrary (e.g. cross page).
|
---|
1369 | * - Advancing beyond the CS segment limit.
|
---|
1370 | * - Fetching from non-mappable page (e.g. MMIO).
|
---|
1371 | * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
|
---|
1372 | *
|
---|
1373 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
1374 | * calling thread.
|
---|
1375 | * @param pvDst Where to return the bytes.
|
---|
1376 | * @param cbDst Number of bytes to read. A value of zero is
|
---|
1377 | * allowed for initializing pbInstrBuf (the
|
---|
1378 | * recompiler does this). In this case it is best
|
---|
1379 | * to set pbInstrBuf to NULL prior to the call.
|
---|
1380 | */
|
---|
1381 | void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
1382 | {
|
---|
1383 | # ifdef IN_RING3
|
---|
1384 | for (;;)
|
---|
1385 | {
|
---|
1386 | Assert(cbDst <= 8);
|
---|
1387 | uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
|
---|
1388 |
|
---|
1389 | /*
|
---|
1390 | * We might have a partial buffer match, deal with that first to make the
|
---|
1391 | * rest simpler. This is the first part of the cross page/buffer case.
|
---|
1392 | */
|
---|
1393 | uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
|
---|
1394 | if (pbInstrBuf != NULL)
|
---|
1395 | {
|
---|
1396 | Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
|
---|
1397 | uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
|
---|
1398 | if (offBuf < cbInstrBuf)
|
---|
1399 | {
|
---|
1400 | Assert(offBuf + cbDst > cbInstrBuf);
|
---|
1401 | uint32_t const cbCopy = cbInstrBuf - offBuf;
|
---|
1402 | memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
|
---|
1403 |
|
---|
1404 | cbDst -= cbCopy;
|
---|
1405 | pvDst = (uint8_t *)pvDst + cbCopy;
|
---|
1406 | offBuf += cbCopy;
|
---|
1407 | }
|
---|
1408 | }
|
---|
1409 |
|
---|
1410 | /*
|
---|
1411 | * Check segment limit, figuring how much we're allowed to access at this point.
|
---|
1412 | *
|
---|
1413 | * We will fault immediately if RIP is past the segment limit / in non-canonical
|
---|
1414 | * territory. If we do continue, there are one or more bytes to read before we
|
---|
1415 | * end up in trouble and we need to do that first before faulting.
|
---|
1416 | */
|
---|
1417 | RTGCPTR GCPtrFirst;
|
---|
1418 | uint32_t cbMaxRead;
|
---|
1419 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
1420 | {
|
---|
1421 | GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
|
---|
1422 | if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
|
---|
1423 | { /* likely */ }
|
---|
1424 | else
|
---|
1425 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
1426 | cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
|
---|
1427 | }
|
---|
1428 | else
|
---|
1429 | {
|
---|
1430 | GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
|
---|
1431 | /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
|
---|
1432 | if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
|
---|
1433 | { /* likely */ }
|
---|
1434 | else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
|
---|
1435 | iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
|
---|
1436 | cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
|
---|
1437 | if (cbMaxRead != 0)
|
---|
1438 | { /* likely */ }
|
---|
1439 | else
|
---|
1440 | {
|
---|
1441 | /* Overflowed because address is 0 and limit is max. */
|
---|
1442 | Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
|
---|
1443 | cbMaxRead = X86_PAGE_SIZE;
|
---|
1444 | }
|
---|
1445 | GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
|
---|
1446 | uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
|
---|
1447 | if (cbMaxRead2 < cbMaxRead)
|
---|
1448 | cbMaxRead = cbMaxRead2;
|
---|
1449 | /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
|
---|
1450 | }
|
---|
1451 |
|
---|
1452 | /*
|
---|
1453 | * Get the TLB entry for this piece of code.
|
---|
1454 | */
|
---|
1455 | uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
|
---|
1456 | PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
|
---|
1457 | if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
|
---|
1458 | || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
|
---|
1459 | {
|
---|
1460 | /* likely when executing lots of code, otherwise unlikely */
|
---|
1461 | # ifdef IEM_WITH_TLB_STATISTICS
|
---|
1462 | pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
|
---|
1463 | # endif
|
---|
1464 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
|
---|
1465 |
|
---|
1466 | /* Check TLB page table level access flags. */
|
---|
1467 | if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
|
---|
1468 | {
|
---|
1469 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
|
---|
1470 | {
|
---|
1471 | Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
|
---|
1472 | iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
|
---|
1473 | }
|
---|
1474 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
|
---|
1475 | {
|
---|
1476 | Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
|
---|
1477 | iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
|
---|
1478 | }
|
---|
1479 | }
|
---|
1480 |
|
---|
1481 | /* Look up the physical page info if necessary. */
|
---|
1482 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
|
---|
1483 | { /* not necessary */ }
|
---|
1484 | else
|
---|
1485 | {
|
---|
1486 | if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
|
---|
1487 | { /* likely */ }
|
---|
1488 | else
|
---|
1489 | IEMTlbInvalidateAllPhysicalSlow(pVCpu);
|
---|
1490 | pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
|
---|
1491 | int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
|
---|
1492 | &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
|
---|
1493 | AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
|
---|
1494 | }
|
---|
1495 | }
|
---|
1496 | else
|
---|
1497 | {
|
---|
1498 | pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
|
---|
1499 |
|
---|
1500 | /* This page table walking will set A bits as required by the access while performing the walk.
|
---|
1501 | ASSUMES these are set when the address is translated rather than on commit... */
|
---|
1502 | /** @todo testcase: check when A bits are actually set by the CPU for code. */
|
---|
1503 | PGMPTWALKFAST WalkFast;
|
---|
1504 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
|
---|
1505 | IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
|
---|
1506 | &WalkFast);
|
---|
1507 | if (RT_SUCCESS(rc))
|
---|
1508 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
1509 | else
|
---|
1510 | {
|
---|
1511 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
1512 | /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
|
---|
1513 | Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
|
---|
1514 | # endif
|
---|
1515 | Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
|
---|
1516 | iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
|
---|
1517 | }
|
---|
1518 |
|
---|
1519 | AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
|
---|
1520 | if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
|
---|
1521 | || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
|
---|
1522 | {
|
---|
1523 | pTlbe--;
|
---|
1524 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
|
---|
1525 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
1526 | iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
1527 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
1528 | else
|
---|
1529 | ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
|
---|
1530 | # endif
|
---|
1531 | }
|
---|
1532 | else
|
---|
1533 | {
|
---|
1534 | pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
|
---|
1535 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
|
---|
1536 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
1537 | iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
1538 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
1539 | else
|
---|
1540 | ASMBitClear(pVCpu->iem.s.CodeTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
|
---|
1541 | # endif
|
---|
1542 | }
|
---|
1543 | pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
|
---|
1544 | | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
|
---|
1545 | | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
|
---|
1546 | RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
|
---|
1547 | pTlbe->GCPhys = GCPhysPg;
|
---|
1548 | pTlbe->pbMappingR3 = NULL;
|
---|
1549 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
|
---|
1550 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
|
---|
1551 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
|
---|
1552 |
|
---|
1553 | if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
|
---|
1554 | IEMTLBTRACE_LOAD( pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
|
---|
1555 | else
|
---|
1556 | IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, false);
|
---|
1557 |
|
---|
1558 | /* Resolve the physical address. */
|
---|
1559 | if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
|
---|
1560 | { /* likely */ }
|
---|
1561 | else
|
---|
1562 | IEMTlbInvalidateAllPhysicalSlow(pVCpu);
|
---|
1563 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
|
---|
1564 | rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
|
---|
1565 | &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
|
---|
1566 | AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
|
---|
1567 | }
|
---|
1568 |
|
---|
1569 | # if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
|
---|
1570 | /*
|
---|
1571 | * Try do a direct read using the pbMappingR3 pointer.
|
---|
1572 | * Note! Do not recheck the physical TLB revision number here as we have the
|
---|
1573 | * wrong response to changes in the else case. If someone is updating
|
---|
1574 | * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
|
---|
1575 | * pretending we always won the race.
|
---|
1576 | */
|
---|
1577 | if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
|
---|
1578 | == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
|
---|
1579 | {
|
---|
1580 | uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
|
---|
1581 | pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
|
---|
1582 | if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
|
---|
1583 | {
|
---|
1584 | pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
|
---|
1585 | pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
|
---|
1586 | }
|
---|
1587 | else
|
---|
1588 | {
|
---|
1589 | uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
|
---|
1590 | if (cbInstr + (uint32_t)cbDst <= 15)
|
---|
1591 | {
|
---|
1592 | pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
|
---|
1593 | pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
|
---|
1594 | }
|
---|
1595 | else
|
---|
1596 | {
|
---|
1597 | Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
|
---|
1598 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
|
---|
1599 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
1600 | }
|
---|
1601 | }
|
---|
1602 | if (cbDst <= cbMaxRead)
|
---|
1603 | {
|
---|
1604 | pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
|
---|
1605 | # if 0 /* unused */
|
---|
1606 | pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
|
---|
1607 | # endif
|
---|
1608 | pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
|
---|
1609 | pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
|
---|
1610 | pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
|
---|
1611 | pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
|
---|
1612 | if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
|
---|
1613 | memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
|
---|
1614 | else
|
---|
1615 | Assert(!pvDst);
|
---|
1616 | return;
|
---|
1617 | }
|
---|
1618 | pVCpu->iem.s.pbInstrBuf = NULL;
|
---|
1619 |
|
---|
1620 | memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
|
---|
1621 | pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
|
---|
1622 | }
|
---|
1623 | # else
|
---|
1624 | # error "refactor as needed"
|
---|
1625 | /*
|
---|
1626 | * If there is no special read handling, so we can read a bit more and
|
---|
1627 | * put it in the prefetch buffer.
|
---|
1628 | */
|
---|
1629 | if ( cbDst < cbMaxRead
|
---|
1630 | && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
|
---|
1631 | {
|
---|
1632 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
|
---|
1633 | &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
|
---|
1634 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
1635 | { /* likely */ }
|
---|
1636 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
1637 | {
|
---|
1638 | Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
|
---|
1639 | GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
1640 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
1641 | AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
|
---|
1642 | }
|
---|
1643 | else
|
---|
1644 | {
|
---|
1645 | Log((RT_SUCCESS(rcStrict)
|
---|
1646 | ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
|
---|
1647 | : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
|
---|
1648 | GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
1649 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
1650 | }
|
---|
1651 | }
|
---|
1652 | # endif
|
---|
1653 | /*
|
---|
1654 | * Special read handling, so only read exactly what's needed.
|
---|
1655 | * This is a highly unlikely scenario.
|
---|
1656 | */
|
---|
1657 | else
|
---|
1658 | {
|
---|
1659 | pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
|
---|
1660 |
|
---|
1661 | /* Check instruction length. */
|
---|
1662 | uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
|
---|
1663 | if (RT_LIKELY(cbInstr + cbDst <= 15))
|
---|
1664 | { /* likely */ }
|
---|
1665 | else
|
---|
1666 | {
|
---|
1667 | Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
|
---|
1668 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
|
---|
1669 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
1670 | }
|
---|
1671 |
|
---|
1672 | /* Do the reading. */
|
---|
1673 | uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
|
---|
1674 | if (cbToRead > 0)
|
---|
1675 | {
|
---|
1676 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
|
---|
1677 | pvDst, cbToRead, PGMACCESSORIGIN_IEM);
|
---|
1678 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
1679 | { /* likely */ }
|
---|
1680 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
1681 | {
|
---|
1682 | Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
|
---|
1683 | GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
|
---|
1684 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
1685 | AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
1686 | }
|
---|
1687 | else
|
---|
1688 | {
|
---|
1689 | Log((RT_SUCCESS(rcStrict)
|
---|
1690 | ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
|
---|
1691 | : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
|
---|
1692 | GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
|
---|
1693 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
1694 | }
|
---|
1695 | }
|
---|
1696 |
|
---|
1697 | /* Update the state and probably return. */
|
---|
1698 | uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
|
---|
1699 | pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
|
---|
1700 | # if 0 /* unused */
|
---|
1701 | pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
|
---|
1702 | # endif
|
---|
1703 | pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
|
---|
1704 | pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
|
---|
1705 | pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
|
---|
1706 | pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
|
---|
1707 | pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
|
---|
1708 | pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
|
---|
1709 | pVCpu->iem.s.pbInstrBuf = NULL;
|
---|
1710 | if (cbToRead == cbDst)
|
---|
1711 | return;
|
---|
1712 | Assert(cbToRead == cbMaxRead);
|
---|
1713 | }
|
---|
1714 |
|
---|
1715 | /*
|
---|
1716 | * More to read, loop.
|
---|
1717 | */
|
---|
1718 | cbDst -= cbMaxRead;
|
---|
1719 | pvDst = (uint8_t *)pvDst + cbMaxRead;
|
---|
1720 | }
|
---|
1721 | # else /* !IN_RING3 */
|
---|
1722 | RT_NOREF(pvDst, cbDst);
|
---|
1723 | if (pvDst || cbDst)
|
---|
1724 | IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
|
---|
1725 | # endif /* !IN_RING3 */
|
---|
1726 | }
|
---|
1727 |
|
---|
1728 | #else /* !IEM_WITH_CODE_TLB */
|
---|
1729 |
|
---|
1730 | /**
|
---|
1731 | * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
|
---|
1732 | * exception if it fails.
|
---|
1733 | *
|
---|
1734 | * @returns Strict VBox status code.
|
---|
1735 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
1736 | * calling thread.
|
---|
1737 | * @param cbMin The minimum number of bytes relative offOpcode
|
---|
1738 | * that must be read.
|
---|
1739 | */
|
---|
1740 | VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
|
---|
1741 | {
|
---|
1742 | /*
|
---|
1743 | * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
|
---|
1744 | *
|
---|
1745 | * First translate CS:rIP to a physical address.
|
---|
1746 | */
|
---|
1747 | uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
|
---|
1748 | uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
|
---|
1749 | uint8_t const cbLeft = cbOpcode - offOpcode;
|
---|
1750 | Assert(cbLeft < cbMin);
|
---|
1751 | Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
|
---|
1752 |
|
---|
1753 | uint32_t cbToTryRead;
|
---|
1754 | RTGCPTR GCPtrNext;
|
---|
1755 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
1756 | {
|
---|
1757 | GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
|
---|
1758 | if (!IEM_IS_CANONICAL(GCPtrNext))
|
---|
1759 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
1760 | cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
|
---|
1761 | }
|
---|
1762 | else
|
---|
1763 | {
|
---|
1764 | uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
|
---|
1765 | /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
|
---|
1766 | GCPtrNext32 += cbOpcode;
|
---|
1767 | if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
|
---|
1768 | /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
|
---|
1769 | return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
|
---|
1770 | cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
|
---|
1771 | if (!cbToTryRead) /* overflowed */
|
---|
1772 | {
|
---|
1773 | Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
|
---|
1774 | cbToTryRead = UINT32_MAX;
|
---|
1775 | /** @todo check out wrapping around the code segment. */
|
---|
1776 | }
|
---|
1777 | if (cbToTryRead < cbMin - cbLeft)
|
---|
1778 | return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
|
---|
1779 | GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
|
---|
1780 |
|
---|
1781 | uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
|
---|
1782 | if (cbToTryRead > cbLeftOnPage)
|
---|
1783 | cbToTryRead = cbLeftOnPage;
|
---|
1784 | }
|
---|
1785 |
|
---|
1786 | /* Restrict to opcode buffer space.
|
---|
1787 |
|
---|
1788 | We're making ASSUMPTIONS here based on work done previously in
|
---|
1789 | iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
|
---|
1790 | be fetched in case of an instruction crossing two pages. */
|
---|
1791 | if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
|
---|
1792 | cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
|
---|
1793 | if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
|
---|
1794 | { /* likely */ }
|
---|
1795 | else
|
---|
1796 | {
|
---|
1797 | Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
|
---|
1798 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
|
---|
1799 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
1800 | }
|
---|
1801 |
|
---|
1802 | PGMPTWALKFAST WalkFast;
|
---|
1803 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
|
---|
1804 | IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
|
---|
1805 | &WalkFast);
|
---|
1806 | if (RT_SUCCESS(rc))
|
---|
1807 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
1808 | else
|
---|
1809 | {
|
---|
1810 | Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
|
---|
1811 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
1812 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
1813 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
1814 | #endif
|
---|
1815 | return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
|
---|
1816 | }
|
---|
1817 | Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
|
---|
1818 | Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
|
---|
1819 |
|
---|
1820 | RTGCPHYS const GCPhys = WalkFast.GCPhys;
|
---|
1821 | Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
|
---|
1822 |
|
---|
1823 | /*
|
---|
1824 | * Read the bytes at this address.
|
---|
1825 | *
|
---|
1826 | * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
|
---|
1827 | * and since PATM should only patch the start of an instruction there
|
---|
1828 | * should be no need to check again here.
|
---|
1829 | */
|
---|
1830 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
1831 | {
|
---|
1832 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
|
---|
1833 | cbToTryRead, PGMACCESSORIGIN_IEM);
|
---|
1834 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
1835 | { /* likely */ }
|
---|
1836 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
1837 | {
|
---|
1838 | Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
|
---|
1839 | GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
1840 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
1841 | }
|
---|
1842 | else
|
---|
1843 | {
|
---|
1844 | Log((RT_SUCCESS(rcStrict)
|
---|
1845 | ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
|
---|
1846 | : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
|
---|
1847 | GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
|
---|
1848 | return rcStrict;
|
---|
1849 | }
|
---|
1850 | }
|
---|
1851 | else
|
---|
1852 | {
|
---|
1853 | rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
|
---|
1854 | if (RT_SUCCESS(rc))
|
---|
1855 | { /* likely */ }
|
---|
1856 | else
|
---|
1857 | {
|
---|
1858 | Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
|
---|
1859 | return rc;
|
---|
1860 | }
|
---|
1861 | }
|
---|
1862 | pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
|
---|
1863 | Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
|
---|
1864 |
|
---|
1865 | return VINF_SUCCESS;
|
---|
1866 | }
|
---|
1867 |
|
---|
1868 | #endif /* !IEM_WITH_CODE_TLB */
|
---|
1869 | #ifndef IEM_WITH_SETJMP
|
---|
1870 |
|
---|
1871 | /**
|
---|
1872 | * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
|
---|
1873 | *
|
---|
1874 | * @returns Strict VBox status code.
|
---|
1875 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
1876 | * calling thread.
|
---|
1877 | * @param pb Where to return the opcode byte.
|
---|
1878 | */
|
---|
1879 | VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
|
---|
1880 | {
|
---|
1881 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
|
---|
1882 | if (rcStrict == VINF_SUCCESS)
|
---|
1883 | {
|
---|
1884 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
1885 | *pb = pVCpu->iem.s.abOpcode[offOpcode];
|
---|
1886 | pVCpu->iem.s.offOpcode = offOpcode + 1;
|
---|
1887 | }
|
---|
1888 | else
|
---|
1889 | *pb = 0;
|
---|
1890 | return rcStrict;
|
---|
1891 | }
|
---|
1892 |
|
---|
1893 | #else /* IEM_WITH_SETJMP */
|
---|
1894 |
|
---|
1895 | /**
|
---|
1896 | * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
|
---|
1897 | *
|
---|
1898 | * @returns The opcode byte.
|
---|
1899 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1900 | */
|
---|
1901 | uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
1902 | {
|
---|
1903 | # ifdef IEM_WITH_CODE_TLB
|
---|
1904 | uint8_t u8;
|
---|
1905 | iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
|
---|
1906 | return u8;
|
---|
1907 | # else
|
---|
1908 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
|
---|
1909 | if (rcStrict == VINF_SUCCESS)
|
---|
1910 | return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
|
---|
1911 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
1912 | # endif
|
---|
1913 | }
|
---|
1914 |
|
---|
1915 | #endif /* IEM_WITH_SETJMP */
|
---|
1916 |
|
---|
1917 | #ifndef IEM_WITH_SETJMP
|
---|
1918 |
|
---|
1919 | /**
|
---|
1920 | * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
|
---|
1921 | *
|
---|
1922 | * @returns Strict VBox status code.
|
---|
1923 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1924 | * @param pu16 Where to return the opcode dword.
|
---|
1925 | */
|
---|
1926 | VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
|
---|
1927 | {
|
---|
1928 | uint8_t u8;
|
---|
1929 | VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
|
---|
1930 | if (rcStrict == VINF_SUCCESS)
|
---|
1931 | *pu16 = (int8_t)u8;
|
---|
1932 | return rcStrict;
|
---|
1933 | }
|
---|
1934 |
|
---|
1935 |
|
---|
1936 | /**
|
---|
1937 | * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
|
---|
1938 | *
|
---|
1939 | * @returns Strict VBox status code.
|
---|
1940 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1941 | * @param pu32 Where to return the opcode dword.
|
---|
1942 | */
|
---|
1943 | VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
|
---|
1944 | {
|
---|
1945 | uint8_t u8;
|
---|
1946 | VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
|
---|
1947 | if (rcStrict == VINF_SUCCESS)
|
---|
1948 | *pu32 = (int8_t)u8;
|
---|
1949 | return rcStrict;
|
---|
1950 | }
|
---|
1951 |
|
---|
1952 |
|
---|
1953 | /**
|
---|
1954 | * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
|
---|
1955 | *
|
---|
1956 | * @returns Strict VBox status code.
|
---|
1957 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1958 | * @param pu64 Where to return the opcode qword.
|
---|
1959 | */
|
---|
1960 | VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
|
---|
1961 | {
|
---|
1962 | uint8_t u8;
|
---|
1963 | VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
|
---|
1964 | if (rcStrict == VINF_SUCCESS)
|
---|
1965 | *pu64 = (int8_t)u8;
|
---|
1966 | return rcStrict;
|
---|
1967 | }
|
---|
1968 |
|
---|
1969 | #endif /* !IEM_WITH_SETJMP */
|
---|
1970 |
|
---|
1971 |
|
---|
1972 | #ifndef IEM_WITH_SETJMP
|
---|
1973 |
|
---|
1974 | /**
|
---|
1975 | * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
|
---|
1976 | *
|
---|
1977 | * @returns Strict VBox status code.
|
---|
1978 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
1979 | * @param pu16 Where to return the opcode word.
|
---|
1980 | */
|
---|
1981 | VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
|
---|
1982 | {
|
---|
1983 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
|
---|
1984 | if (rcStrict == VINF_SUCCESS)
|
---|
1985 | {
|
---|
1986 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
1987 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
1988 | *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
1989 | # else
|
---|
1990 | *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
|
---|
1991 | # endif
|
---|
1992 | pVCpu->iem.s.offOpcode = offOpcode + 2;
|
---|
1993 | }
|
---|
1994 | else
|
---|
1995 | *pu16 = 0;
|
---|
1996 | return rcStrict;
|
---|
1997 | }
|
---|
1998 |
|
---|
1999 | #else /* IEM_WITH_SETJMP */
|
---|
2000 |
|
---|
2001 | /**
|
---|
2002 | * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
|
---|
2003 | *
|
---|
2004 | * @returns The opcode word.
|
---|
2005 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2006 | */
|
---|
2007 | uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
2008 | {
|
---|
2009 | # ifdef IEM_WITH_CODE_TLB
|
---|
2010 | uint16_t u16;
|
---|
2011 | iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
|
---|
2012 | return u16;
|
---|
2013 | # else
|
---|
2014 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
|
---|
2015 | if (rcStrict == VINF_SUCCESS)
|
---|
2016 | {
|
---|
2017 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2018 | pVCpu->iem.s.offOpcode += 2;
|
---|
2019 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
2020 | return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
2021 | # else
|
---|
2022 | return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
|
---|
2023 | # endif
|
---|
2024 | }
|
---|
2025 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
2026 | # endif
|
---|
2027 | }
|
---|
2028 |
|
---|
2029 | #endif /* IEM_WITH_SETJMP */
|
---|
2030 |
|
---|
2031 | #ifndef IEM_WITH_SETJMP
|
---|
2032 |
|
---|
2033 | /**
|
---|
2034 | * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
|
---|
2035 | *
|
---|
2036 | * @returns Strict VBox status code.
|
---|
2037 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2038 | * @param pu32 Where to return the opcode double word.
|
---|
2039 | */
|
---|
2040 | VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
|
---|
2041 | {
|
---|
2042 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
|
---|
2043 | if (rcStrict == VINF_SUCCESS)
|
---|
2044 | {
|
---|
2045 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2046 | *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
|
---|
2047 | pVCpu->iem.s.offOpcode = offOpcode + 2;
|
---|
2048 | }
|
---|
2049 | else
|
---|
2050 | *pu32 = 0;
|
---|
2051 | return rcStrict;
|
---|
2052 | }
|
---|
2053 |
|
---|
2054 |
|
---|
2055 | /**
|
---|
2056 | * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
|
---|
2057 | *
|
---|
2058 | * @returns Strict VBox status code.
|
---|
2059 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2060 | * @param pu64 Where to return the opcode quad word.
|
---|
2061 | */
|
---|
2062 | VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
|
---|
2063 | {
|
---|
2064 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
|
---|
2065 | if (rcStrict == VINF_SUCCESS)
|
---|
2066 | {
|
---|
2067 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2068 | *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
|
---|
2069 | pVCpu->iem.s.offOpcode = offOpcode + 2;
|
---|
2070 | }
|
---|
2071 | else
|
---|
2072 | *pu64 = 0;
|
---|
2073 | return rcStrict;
|
---|
2074 | }
|
---|
2075 |
|
---|
2076 | #endif /* !IEM_WITH_SETJMP */
|
---|
2077 |
|
---|
2078 | #ifndef IEM_WITH_SETJMP
|
---|
2079 |
|
---|
2080 | /**
|
---|
2081 | * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
|
---|
2082 | *
|
---|
2083 | * @returns Strict VBox status code.
|
---|
2084 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2085 | * @param pu32 Where to return the opcode dword.
|
---|
2086 | */
|
---|
2087 | VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
|
---|
2088 | {
|
---|
2089 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
|
---|
2090 | if (rcStrict == VINF_SUCCESS)
|
---|
2091 | {
|
---|
2092 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2093 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
2094 | *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
2095 | # else
|
---|
2096 | *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
|
---|
2097 | pVCpu->iem.s.abOpcode[offOpcode + 1],
|
---|
2098 | pVCpu->iem.s.abOpcode[offOpcode + 2],
|
---|
2099 | pVCpu->iem.s.abOpcode[offOpcode + 3]);
|
---|
2100 | # endif
|
---|
2101 | pVCpu->iem.s.offOpcode = offOpcode + 4;
|
---|
2102 | }
|
---|
2103 | else
|
---|
2104 | *pu32 = 0;
|
---|
2105 | return rcStrict;
|
---|
2106 | }
|
---|
2107 |
|
---|
2108 | #else /* IEM_WITH_SETJMP */
|
---|
2109 |
|
---|
2110 | /**
|
---|
2111 | * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
|
---|
2112 | *
|
---|
2113 | * @returns The opcode dword.
|
---|
2114 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2115 | */
|
---|
2116 | uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
2117 | {
|
---|
2118 | # ifdef IEM_WITH_CODE_TLB
|
---|
2119 | uint32_t u32;
|
---|
2120 | iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
|
---|
2121 | return u32;
|
---|
2122 | # else
|
---|
2123 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
|
---|
2124 | if (rcStrict == VINF_SUCCESS)
|
---|
2125 | {
|
---|
2126 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2127 | pVCpu->iem.s.offOpcode = offOpcode + 4;
|
---|
2128 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
2129 | return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
2130 | # else
|
---|
2131 | return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
|
---|
2132 | pVCpu->iem.s.abOpcode[offOpcode + 1],
|
---|
2133 | pVCpu->iem.s.abOpcode[offOpcode + 2],
|
---|
2134 | pVCpu->iem.s.abOpcode[offOpcode + 3]);
|
---|
2135 | # endif
|
---|
2136 | }
|
---|
2137 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
2138 | # endif
|
---|
2139 | }
|
---|
2140 |
|
---|
2141 | #endif /* IEM_WITH_SETJMP */
|
---|
2142 |
|
---|
2143 | #ifndef IEM_WITH_SETJMP
|
---|
2144 |
|
---|
2145 | /**
|
---|
2146 | * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
|
---|
2147 | *
|
---|
2148 | * @returns Strict VBox status code.
|
---|
2149 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2150 | * @param pu64 Where to return the opcode dword.
|
---|
2151 | */
|
---|
2152 | VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
|
---|
2153 | {
|
---|
2154 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
|
---|
2155 | if (rcStrict == VINF_SUCCESS)
|
---|
2156 | {
|
---|
2157 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2158 | *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
|
---|
2159 | pVCpu->iem.s.abOpcode[offOpcode + 1],
|
---|
2160 | pVCpu->iem.s.abOpcode[offOpcode + 2],
|
---|
2161 | pVCpu->iem.s.abOpcode[offOpcode + 3]);
|
---|
2162 | pVCpu->iem.s.offOpcode = offOpcode + 4;
|
---|
2163 | }
|
---|
2164 | else
|
---|
2165 | *pu64 = 0;
|
---|
2166 | return rcStrict;
|
---|
2167 | }
|
---|
2168 |
|
---|
2169 |
|
---|
2170 | /**
|
---|
2171 | * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
|
---|
2172 | *
|
---|
2173 | * @returns Strict VBox status code.
|
---|
2174 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2175 | * @param pu64 Where to return the opcode qword.
|
---|
2176 | */
|
---|
2177 | VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
|
---|
2178 | {
|
---|
2179 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
|
---|
2180 | if (rcStrict == VINF_SUCCESS)
|
---|
2181 | {
|
---|
2182 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2183 | *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
|
---|
2184 | pVCpu->iem.s.abOpcode[offOpcode + 1],
|
---|
2185 | pVCpu->iem.s.abOpcode[offOpcode + 2],
|
---|
2186 | pVCpu->iem.s.abOpcode[offOpcode + 3]);
|
---|
2187 | pVCpu->iem.s.offOpcode = offOpcode + 4;
|
---|
2188 | }
|
---|
2189 | else
|
---|
2190 | *pu64 = 0;
|
---|
2191 | return rcStrict;
|
---|
2192 | }
|
---|
2193 |
|
---|
2194 | #endif /* !IEM_WITH_SETJMP */
|
---|
2195 |
|
---|
2196 | #ifndef IEM_WITH_SETJMP
|
---|
2197 |
|
---|
2198 | /**
|
---|
2199 | * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
|
---|
2200 | *
|
---|
2201 | * @returns Strict VBox status code.
|
---|
2202 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2203 | * @param pu64 Where to return the opcode qword.
|
---|
2204 | */
|
---|
2205 | VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
|
---|
2206 | {
|
---|
2207 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
|
---|
2208 | if (rcStrict == VINF_SUCCESS)
|
---|
2209 | {
|
---|
2210 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2211 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
2212 | *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
2213 | # else
|
---|
2214 | *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
|
---|
2215 | pVCpu->iem.s.abOpcode[offOpcode + 1],
|
---|
2216 | pVCpu->iem.s.abOpcode[offOpcode + 2],
|
---|
2217 | pVCpu->iem.s.abOpcode[offOpcode + 3],
|
---|
2218 | pVCpu->iem.s.abOpcode[offOpcode + 4],
|
---|
2219 | pVCpu->iem.s.abOpcode[offOpcode + 5],
|
---|
2220 | pVCpu->iem.s.abOpcode[offOpcode + 6],
|
---|
2221 | pVCpu->iem.s.abOpcode[offOpcode + 7]);
|
---|
2222 | # endif
|
---|
2223 | pVCpu->iem.s.offOpcode = offOpcode + 8;
|
---|
2224 | }
|
---|
2225 | else
|
---|
2226 | *pu64 = 0;
|
---|
2227 | return rcStrict;
|
---|
2228 | }
|
---|
2229 |
|
---|
2230 | #else /* IEM_WITH_SETJMP */
|
---|
2231 |
|
---|
2232 | /**
|
---|
2233 | * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
|
---|
2234 | *
|
---|
2235 | * @returns The opcode qword.
|
---|
2236 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2237 | */
|
---|
2238 | uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
2239 | {
|
---|
2240 | # ifdef IEM_WITH_CODE_TLB
|
---|
2241 | uint64_t u64;
|
---|
2242 | iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
|
---|
2243 | return u64;
|
---|
2244 | # else
|
---|
2245 | VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
|
---|
2246 | if (rcStrict == VINF_SUCCESS)
|
---|
2247 | {
|
---|
2248 | uint8_t offOpcode = pVCpu->iem.s.offOpcode;
|
---|
2249 | pVCpu->iem.s.offOpcode = offOpcode + 8;
|
---|
2250 | # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
|
---|
2251 | return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
|
---|
2252 | # else
|
---|
2253 | return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
|
---|
2254 | pVCpu->iem.s.abOpcode[offOpcode + 1],
|
---|
2255 | pVCpu->iem.s.abOpcode[offOpcode + 2],
|
---|
2256 | pVCpu->iem.s.abOpcode[offOpcode + 3],
|
---|
2257 | pVCpu->iem.s.abOpcode[offOpcode + 4],
|
---|
2258 | pVCpu->iem.s.abOpcode[offOpcode + 5],
|
---|
2259 | pVCpu->iem.s.abOpcode[offOpcode + 6],
|
---|
2260 | pVCpu->iem.s.abOpcode[offOpcode + 7]);
|
---|
2261 | # endif
|
---|
2262 | }
|
---|
2263 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
2264 | # endif
|
---|
2265 | }
|
---|
2266 |
|
---|
2267 | #endif /* IEM_WITH_SETJMP */
|
---|
2268 |
|
---|
2269 |
|
---|
2270 |
|
---|
2271 | /** @name Misc Worker Functions.
|
---|
2272 | * @{
|
---|
2273 | */
|
---|
2274 |
|
---|
2275 | /**
|
---|
2276 | * Gets the exception class for the specified exception vector.
|
---|
2277 | *
|
---|
2278 | * @returns The class of the specified exception.
|
---|
2279 | * @param uVector The exception vector.
|
---|
2280 | */
|
---|
2281 | static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
|
---|
2282 | {
|
---|
2283 | Assert(uVector <= X86_XCPT_LAST);
|
---|
2284 | switch (uVector)
|
---|
2285 | {
|
---|
2286 | case X86_XCPT_DE:
|
---|
2287 | case X86_XCPT_TS:
|
---|
2288 | case X86_XCPT_NP:
|
---|
2289 | case X86_XCPT_SS:
|
---|
2290 | case X86_XCPT_GP:
|
---|
2291 | case X86_XCPT_SX: /* AMD only */
|
---|
2292 | return IEMXCPTCLASS_CONTRIBUTORY;
|
---|
2293 |
|
---|
2294 | case X86_XCPT_PF:
|
---|
2295 | case X86_XCPT_VE: /* Intel only */
|
---|
2296 | return IEMXCPTCLASS_PAGE_FAULT;
|
---|
2297 |
|
---|
2298 | case X86_XCPT_DF:
|
---|
2299 | return IEMXCPTCLASS_DOUBLE_FAULT;
|
---|
2300 | }
|
---|
2301 | return IEMXCPTCLASS_BENIGN;
|
---|
2302 | }
|
---|
2303 |
|
---|
2304 |
|
---|
2305 | /**
|
---|
2306 | * Evaluates how to handle an exception caused during delivery of another event
|
---|
2307 | * (exception / interrupt).
|
---|
2308 | *
|
---|
2309 | * @returns How to handle the recursive exception.
|
---|
2310 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
2311 | * calling thread.
|
---|
2312 | * @param fPrevFlags The flags of the previous event.
|
---|
2313 | * @param uPrevVector The vector of the previous event.
|
---|
2314 | * @param fCurFlags The flags of the current exception.
|
---|
2315 | * @param uCurVector The vector of the current exception.
|
---|
2316 | * @param pfXcptRaiseInfo Where to store additional information about the
|
---|
2317 | * exception condition. Optional.
|
---|
2318 | */
|
---|
2319 | VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
|
---|
2320 | uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
|
---|
2321 | {
|
---|
2322 | /*
|
---|
2323 | * Only CPU exceptions can be raised while delivering other events, software interrupt
|
---|
2324 | * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
|
---|
2325 | */
|
---|
2326 | AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
|
---|
2327 | Assert(pVCpu); RT_NOREF(pVCpu);
|
---|
2328 | Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
|
---|
2329 |
|
---|
2330 | IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
|
---|
2331 | IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
|
---|
2332 | if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
|
---|
2333 | {
|
---|
2334 | IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
|
---|
2335 | if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
|
---|
2336 | {
|
---|
2337 | IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
|
---|
2338 | if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
|
---|
2339 | && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
|
---|
2340 | || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
|
---|
2341 | {
|
---|
2342 | enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
|
---|
2343 | fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
|
---|
2344 | : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
|
---|
2345 | Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
|
---|
2346 | uCurVector, pVCpu->cpum.GstCtx.cr2));
|
---|
2347 | }
|
---|
2348 | else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
|
---|
2349 | && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
|
---|
2350 | {
|
---|
2351 | enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
|
---|
2352 | Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
|
---|
2353 | }
|
---|
2354 | else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
|
---|
2355 | && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
|
---|
2356 | || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
|
---|
2357 | {
|
---|
2358 | enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
|
---|
2359 | Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
|
---|
2360 | }
|
---|
2361 | }
|
---|
2362 | else
|
---|
2363 | {
|
---|
2364 | if (uPrevVector == X86_XCPT_NMI)
|
---|
2365 | {
|
---|
2366 | fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
|
---|
2367 | if (uCurVector == X86_XCPT_PF)
|
---|
2368 | {
|
---|
2369 | fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
|
---|
2370 | Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
|
---|
2371 | }
|
---|
2372 | }
|
---|
2373 | else if ( uPrevVector == X86_XCPT_AC
|
---|
2374 | && uCurVector == X86_XCPT_AC)
|
---|
2375 | {
|
---|
2376 | enmRaise = IEMXCPTRAISE_CPU_HANG;
|
---|
2377 | fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
|
---|
2378 | Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
|
---|
2379 | }
|
---|
2380 | }
|
---|
2381 | }
|
---|
2382 | else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
|
---|
2383 | {
|
---|
2384 | fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
|
---|
2385 | if (uCurVector == X86_XCPT_PF)
|
---|
2386 | fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
|
---|
2387 | }
|
---|
2388 | else
|
---|
2389 | {
|
---|
2390 | Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
|
---|
2391 | fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
|
---|
2392 | }
|
---|
2393 |
|
---|
2394 | if (pfXcptRaiseInfo)
|
---|
2395 | *pfXcptRaiseInfo = fRaiseInfo;
|
---|
2396 | return enmRaise;
|
---|
2397 | }
|
---|
2398 |
|
---|
2399 |
|
---|
2400 | /**
|
---|
2401 | * Enters the CPU shutdown state initiated by a triple fault or other
|
---|
2402 | * unrecoverable conditions.
|
---|
2403 | *
|
---|
2404 | * @returns Strict VBox status code.
|
---|
2405 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
2406 | * calling thread.
|
---|
2407 | */
|
---|
2408 | static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
2409 | {
|
---|
2410 | if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
|
---|
2411 | IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
|
---|
2412 |
|
---|
2413 | if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
|
---|
2414 | {
|
---|
2415 | Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
|
---|
2416 | IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
|
---|
2417 | }
|
---|
2418 |
|
---|
2419 | RT_NOREF(pVCpu);
|
---|
2420 | return VINF_EM_TRIPLE_FAULT;
|
---|
2421 | }
|
---|
2422 |
|
---|
2423 |
|
---|
2424 | /**
|
---|
2425 | * Validates a new SS segment.
|
---|
2426 | *
|
---|
2427 | * @returns VBox strict status code.
|
---|
2428 | * @param pVCpu The cross context virtual CPU structure of the
|
---|
2429 | * calling thread.
|
---|
2430 | * @param NewSS The new SS selctor.
|
---|
2431 | * @param uCpl The CPL to load the stack for.
|
---|
2432 | * @param pDesc Where to return the descriptor.
|
---|
2433 | */
|
---|
2434 | static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
|
---|
2435 | {
|
---|
2436 | /* Null selectors are not allowed (we're not called for dispatching
|
---|
2437 | interrupts with SS=0 in long mode). */
|
---|
2438 | if (!(NewSS & X86_SEL_MASK_OFF_RPL))
|
---|
2439 | {
|
---|
2440 | Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
|
---|
2441 | return iemRaiseTaskSwitchFault0(pVCpu);
|
---|
2442 | }
|
---|
2443 |
|
---|
2444 | /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
|
---|
2445 | if ((NewSS & X86_SEL_RPL) != uCpl)
|
---|
2446 | {
|
---|
2447 | Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
|
---|
2448 | return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
|
---|
2449 | }
|
---|
2450 |
|
---|
2451 | /*
|
---|
2452 | * Read the descriptor.
|
---|
2453 | */
|
---|
2454 | VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
|
---|
2455 | if (rcStrict != VINF_SUCCESS)
|
---|
2456 | return rcStrict;
|
---|
2457 |
|
---|
2458 | /*
|
---|
2459 | * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
|
---|
2460 | */
|
---|
2461 | if (!pDesc->Legacy.Gen.u1DescType)
|
---|
2462 | {
|
---|
2463 | Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
|
---|
2464 | return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
|
---|
2465 | }
|
---|
2466 |
|
---|
2467 | if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
|
---|
2468 | || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
|
---|
2469 | {
|
---|
2470 | Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
|
---|
2471 | return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
|
---|
2472 | }
|
---|
2473 | if (pDesc->Legacy.Gen.u2Dpl != uCpl)
|
---|
2474 | {
|
---|
2475 | Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
|
---|
2476 | return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
|
---|
2477 | }
|
---|
2478 |
|
---|
2479 | /* Is it there? */
|
---|
2480 | /** @todo testcase: Is this checked before the canonical / limit check below? */
|
---|
2481 | if (!pDesc->Legacy.Gen.u1Present)
|
---|
2482 | {
|
---|
2483 | Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
|
---|
2484 | return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
|
---|
2485 | }
|
---|
2486 |
|
---|
2487 | return VINF_SUCCESS;
|
---|
2488 | }
|
---|
2489 |
|
---|
2490 | /** @} */
|
---|
2491 |
|
---|
2492 |
|
---|
2493 | /** @name Raising Exceptions.
|
---|
2494 | *
|
---|
2495 | * @{
|
---|
2496 | */
|
---|
2497 |
|
---|
2498 |
|
---|
2499 | /**
|
---|
2500 | * Loads the specified stack far pointer from the TSS.
|
---|
2501 | *
|
---|
2502 | * @returns VBox strict status code.
|
---|
2503 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2504 | * @param uCpl The CPL to load the stack for.
|
---|
2505 | * @param pSelSS Where to return the new stack segment.
|
---|
2506 | * @param puEsp Where to return the new stack pointer.
|
---|
2507 | */
|
---|
2508 | static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
|
---|
2509 | {
|
---|
2510 | VBOXSTRICTRC rcStrict;
|
---|
2511 | Assert(uCpl < 4);
|
---|
2512 |
|
---|
2513 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
|
---|
2514 | switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
|
---|
2515 | {
|
---|
2516 | /*
|
---|
2517 | * 16-bit TSS (X86TSS16).
|
---|
2518 | */
|
---|
2519 | case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
|
---|
2520 | case X86_SEL_TYPE_SYS_286_TSS_BUSY:
|
---|
2521 | {
|
---|
2522 | uint32_t off = uCpl * 4 + 2;
|
---|
2523 | if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
|
---|
2524 | {
|
---|
2525 | /** @todo check actual access pattern here. */
|
---|
2526 | uint32_t u32Tmp = 0; /* gcc maybe... */
|
---|
2527 | rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
|
---|
2528 | if (rcStrict == VINF_SUCCESS)
|
---|
2529 | {
|
---|
2530 | *puEsp = RT_LOWORD(u32Tmp);
|
---|
2531 | *pSelSS = RT_HIWORD(u32Tmp);
|
---|
2532 | return VINF_SUCCESS;
|
---|
2533 | }
|
---|
2534 | }
|
---|
2535 | else
|
---|
2536 | {
|
---|
2537 | Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
|
---|
2538 | rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
|
---|
2539 | }
|
---|
2540 | break;
|
---|
2541 | }
|
---|
2542 |
|
---|
2543 | /*
|
---|
2544 | * 32-bit TSS (X86TSS32).
|
---|
2545 | */
|
---|
2546 | case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
|
---|
2547 | case X86_SEL_TYPE_SYS_386_TSS_BUSY:
|
---|
2548 | {
|
---|
2549 | uint32_t off = uCpl * 8 + 4;
|
---|
2550 | if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
|
---|
2551 | {
|
---|
2552 | /** @todo check actual access pattern here. */
|
---|
2553 | uint64_t u64Tmp;
|
---|
2554 | rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
|
---|
2555 | if (rcStrict == VINF_SUCCESS)
|
---|
2556 | {
|
---|
2557 | *puEsp = u64Tmp & UINT32_MAX;
|
---|
2558 | *pSelSS = (RTSEL)(u64Tmp >> 32);
|
---|
2559 | return VINF_SUCCESS;
|
---|
2560 | }
|
---|
2561 | }
|
---|
2562 | else
|
---|
2563 | {
|
---|
2564 | Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
|
---|
2565 | rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
|
---|
2566 | }
|
---|
2567 | break;
|
---|
2568 | }
|
---|
2569 |
|
---|
2570 | default:
|
---|
2571 | AssertFailed();
|
---|
2572 | rcStrict = VERR_IEM_IPE_4;
|
---|
2573 | break;
|
---|
2574 | }
|
---|
2575 |
|
---|
2576 | *puEsp = 0; /* make gcc happy */
|
---|
2577 | *pSelSS = 0; /* make gcc happy */
|
---|
2578 | return rcStrict;
|
---|
2579 | }
|
---|
2580 |
|
---|
2581 |
|
---|
2582 | /**
|
---|
2583 | * Loads the specified stack pointer from the 64-bit TSS.
|
---|
2584 | *
|
---|
2585 | * @returns VBox strict status code.
|
---|
2586 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2587 | * @param uCpl The CPL to load the stack for.
|
---|
2588 | * @param uIst The interrupt stack table index, 0 if to use uCpl.
|
---|
2589 | * @param puRsp Where to return the new stack pointer.
|
---|
2590 | */
|
---|
2591 | static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
|
---|
2592 | {
|
---|
2593 | Assert(uCpl < 4);
|
---|
2594 | Assert(uIst < 8);
|
---|
2595 | *puRsp = 0; /* make gcc happy */
|
---|
2596 |
|
---|
2597 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
|
---|
2598 | AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
|
---|
2599 |
|
---|
2600 | uint32_t off;
|
---|
2601 | if (uIst)
|
---|
2602 | off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
|
---|
2603 | else
|
---|
2604 | off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
|
---|
2605 | if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
|
---|
2606 | {
|
---|
2607 | Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
|
---|
2608 | return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
|
---|
2609 | }
|
---|
2610 |
|
---|
2611 | return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
|
---|
2612 | }
|
---|
2613 |
|
---|
2614 |
|
---|
2615 | /**
|
---|
2616 | * Adjust the CPU state according to the exception being raised.
|
---|
2617 | *
|
---|
2618 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2619 | * @param u8Vector The exception that has been raised.
|
---|
2620 | */
|
---|
2621 | DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
|
---|
2622 | {
|
---|
2623 | switch (u8Vector)
|
---|
2624 | {
|
---|
2625 | case X86_XCPT_DB:
|
---|
2626 | IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
|
---|
2627 | pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
|
---|
2628 | break;
|
---|
2629 | /** @todo Read the AMD and Intel exception reference... */
|
---|
2630 | }
|
---|
2631 | }
|
---|
2632 |
|
---|
2633 |
|
---|
2634 | /**
|
---|
2635 | * Implements exceptions and interrupts for real mode.
|
---|
2636 | *
|
---|
2637 | * @returns VBox strict status code.
|
---|
2638 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2639 | * @param cbInstr The number of bytes to offset rIP by in the return
|
---|
2640 | * address.
|
---|
2641 | * @param u8Vector The interrupt / exception vector number.
|
---|
2642 | * @param fFlags The flags.
|
---|
2643 | * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
|
---|
2644 | * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
|
---|
2645 | */
|
---|
2646 | static VBOXSTRICTRC
|
---|
2647 | iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
|
---|
2648 | uint8_t cbInstr,
|
---|
2649 | uint8_t u8Vector,
|
---|
2650 | uint32_t fFlags,
|
---|
2651 | uint16_t uErr,
|
---|
2652 | uint64_t uCr2) RT_NOEXCEPT
|
---|
2653 | {
|
---|
2654 | NOREF(uErr); NOREF(uCr2);
|
---|
2655 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
|
---|
2656 |
|
---|
2657 | /*
|
---|
2658 | * Read the IDT entry.
|
---|
2659 | */
|
---|
2660 | if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
|
---|
2661 | {
|
---|
2662 | Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
|
---|
2663 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
2664 | }
|
---|
2665 | RTFAR16 Idte;
|
---|
2666 | VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
|
---|
2667 | if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
|
---|
2668 | {
|
---|
2669 | Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
2670 | return rcStrict;
|
---|
2671 | }
|
---|
2672 |
|
---|
2673 | #ifdef LOG_ENABLED
|
---|
2674 | /* If software interrupt, try decode it if logging is enabled and such. */
|
---|
2675 | if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
2676 | && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
|
---|
2677 | iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
|
---|
2678 | #endif
|
---|
2679 |
|
---|
2680 | /*
|
---|
2681 | * Push the stack frame.
|
---|
2682 | */
|
---|
2683 | uint8_t bUnmapInfo;
|
---|
2684 | uint16_t *pu16Frame;
|
---|
2685 | uint64_t uNewRsp;
|
---|
2686 | rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
|
---|
2687 | if (rcStrict != VINF_SUCCESS)
|
---|
2688 | return rcStrict;
|
---|
2689 |
|
---|
2690 | uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
|
---|
2691 | #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
|
---|
2692 | AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
|
---|
2693 | if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
|
---|
2694 | fEfl |= UINT16_C(0xf000);
|
---|
2695 | #endif
|
---|
2696 | pu16Frame[2] = (uint16_t)fEfl;
|
---|
2697 | pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
|
---|
2698 | pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
|
---|
2699 | rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
|
---|
2700 | if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
|
---|
2701 | return rcStrict;
|
---|
2702 |
|
---|
2703 | /*
|
---|
2704 | * Load the vector address into cs:ip and make exception specific state
|
---|
2705 | * adjustments.
|
---|
2706 | */
|
---|
2707 | pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
|
---|
2708 | pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
|
---|
2709 | pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
2710 | pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
|
---|
2711 | /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
|
---|
2712 | pVCpu->cpum.GstCtx.rip = Idte.off;
|
---|
2713 | fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
|
---|
2714 | IEMMISC_SET_EFL(pVCpu, fEfl);
|
---|
2715 |
|
---|
2716 | /** @todo do we actually do this in real mode? */
|
---|
2717 | if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
|
---|
2718 | iemRaiseXcptAdjustState(pVCpu, u8Vector);
|
---|
2719 |
|
---|
2720 | /*
|
---|
2721 | * Deal with debug events that follows the exception and clear inhibit flags.
|
---|
2722 | */
|
---|
2723 | if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
2724 | || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
|
---|
2725 | pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
|
---|
2726 | else
|
---|
2727 | {
|
---|
2728 | Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
|
---|
2729 | u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
|
---|
2730 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
|
---|
2731 | pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
|
---|
2732 | >> CPUMCTX_DBG_HIT_DRX_SHIFT;
|
---|
2733 | pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
|
---|
2734 | return iemRaiseDebugException(pVCpu);
|
---|
2735 | }
|
---|
2736 |
|
---|
2737 | /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
|
---|
2738 | so best leave them alone in case we're in a weird kind of real mode... */
|
---|
2739 |
|
---|
2740 | return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
|
---|
2741 | }
|
---|
2742 |
|
---|
2743 |
|
---|
2744 | /**
|
---|
2745 | * Loads a NULL data selector into when coming from V8086 mode.
|
---|
2746 | *
|
---|
2747 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2748 | * @param pSReg Pointer to the segment register.
|
---|
2749 | */
|
---|
2750 | DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
|
---|
2751 | {
|
---|
2752 | pSReg->Sel = 0;
|
---|
2753 | pSReg->ValidSel = 0;
|
---|
2754 | if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
|
---|
2755 | {
|
---|
2756 | /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
|
---|
2757 | pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
|
---|
2758 | pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
|
---|
2759 | }
|
---|
2760 | else
|
---|
2761 | {
|
---|
2762 | pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
2763 | /** @todo check this on AMD-V */
|
---|
2764 | pSReg->u64Base = 0;
|
---|
2765 | pSReg->u32Limit = 0;
|
---|
2766 | }
|
---|
2767 | }
|
---|
2768 |
|
---|
2769 |
|
---|
2770 | /**
|
---|
2771 | * Loads a segment selector during a task switch in V8086 mode.
|
---|
2772 | *
|
---|
2773 | * @param pSReg Pointer to the segment register.
|
---|
2774 | * @param uSel The selector value to load.
|
---|
2775 | */
|
---|
2776 | DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
|
---|
2777 | {
|
---|
2778 | /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
|
---|
2779 | pSReg->Sel = uSel;
|
---|
2780 | pSReg->ValidSel = uSel;
|
---|
2781 | pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
2782 | pSReg->u64Base = uSel << 4;
|
---|
2783 | pSReg->u32Limit = 0xffff;
|
---|
2784 | pSReg->Attr.u = 0xf3;
|
---|
2785 | }
|
---|
2786 |
|
---|
2787 |
|
---|
2788 | /**
|
---|
2789 | * Loads a segment selector during a task switch in protected mode.
|
---|
2790 | *
|
---|
2791 | * In this task switch scenario, we would throw \#TS exceptions rather than
|
---|
2792 | * \#GPs.
|
---|
2793 | *
|
---|
2794 | * @returns VBox strict status code.
|
---|
2795 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2796 | * @param pSReg Pointer to the segment register.
|
---|
2797 | * @param uSel The new selector value.
|
---|
2798 | *
|
---|
2799 | * @remarks This does _not_ handle CS or SS.
|
---|
2800 | * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
|
---|
2801 | */
|
---|
2802 | static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
|
---|
2803 | {
|
---|
2804 | Assert(!IEM_IS_64BIT_CODE(pVCpu));
|
---|
2805 |
|
---|
2806 | /* Null data selector. */
|
---|
2807 | if (!(uSel & X86_SEL_MASK_OFF_RPL))
|
---|
2808 | {
|
---|
2809 | iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
|
---|
2810 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
|
---|
2811 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
|
---|
2812 | return VINF_SUCCESS;
|
---|
2813 | }
|
---|
2814 |
|
---|
2815 | /* Fetch the descriptor. */
|
---|
2816 | IEMSELDESC Desc;
|
---|
2817 | VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
|
---|
2818 | if (rcStrict != VINF_SUCCESS)
|
---|
2819 | {
|
---|
2820 | Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
|
---|
2821 | VBOXSTRICTRC_VAL(rcStrict)));
|
---|
2822 | return rcStrict;
|
---|
2823 | }
|
---|
2824 |
|
---|
2825 | /* Must be a data segment or readable code segment. */
|
---|
2826 | if ( !Desc.Legacy.Gen.u1DescType
|
---|
2827 | || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
|
---|
2828 | {
|
---|
2829 | Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
|
---|
2830 | Desc.Legacy.Gen.u4Type));
|
---|
2831 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
|
---|
2832 | }
|
---|
2833 |
|
---|
2834 | /* Check privileges for data segments and non-conforming code segments. */
|
---|
2835 | if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
|
---|
2836 | != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
|
---|
2837 | {
|
---|
2838 | /* The RPL and the new CPL must be less than or equal to the DPL. */
|
---|
2839 | if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
|
---|
2840 | || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
|
---|
2841 | {
|
---|
2842 | Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
|
---|
2843 | uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
|
---|
2844 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
|
---|
2845 | }
|
---|
2846 | }
|
---|
2847 |
|
---|
2848 | /* Is it there? */
|
---|
2849 | if (!Desc.Legacy.Gen.u1Present)
|
---|
2850 | {
|
---|
2851 | Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
|
---|
2852 | return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
|
---|
2853 | }
|
---|
2854 |
|
---|
2855 | /* The base and limit. */
|
---|
2856 | uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
|
---|
2857 | uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
|
---|
2858 |
|
---|
2859 | /*
|
---|
2860 | * Ok, everything checked out fine. Now set the accessed bit before
|
---|
2861 | * committing the result into the registers.
|
---|
2862 | */
|
---|
2863 | if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
|
---|
2864 | {
|
---|
2865 | rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
|
---|
2866 | if (rcStrict != VINF_SUCCESS)
|
---|
2867 | return rcStrict;
|
---|
2868 | Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
|
---|
2869 | }
|
---|
2870 |
|
---|
2871 | /* Commit */
|
---|
2872 | pSReg->Sel = uSel;
|
---|
2873 | pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
|
---|
2874 | pSReg->u32Limit = cbLimit;
|
---|
2875 | pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
|
---|
2876 | pSReg->ValidSel = uSel;
|
---|
2877 | pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
2878 | if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
|
---|
2879 | pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
|
---|
2880 |
|
---|
2881 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
|
---|
2882 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
|
---|
2883 | return VINF_SUCCESS;
|
---|
2884 | }
|
---|
2885 |
|
---|
2886 |
|
---|
2887 | /**
|
---|
2888 | * Performs a task switch.
|
---|
2889 | *
|
---|
2890 | * If the task switch is the result of a JMP, CALL or IRET instruction, the
|
---|
2891 | * caller is responsible for performing the necessary checks (like DPL, TSS
|
---|
2892 | * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
|
---|
2893 | * reference for JMP, CALL, IRET.
|
---|
2894 | *
|
---|
2895 | * If the task switch is the due to a software interrupt or hardware exception,
|
---|
2896 | * the caller is responsible for validating the TSS selector and descriptor. See
|
---|
2897 | * Intel Instruction reference for INT n.
|
---|
2898 | *
|
---|
2899 | * @returns VBox strict status code.
|
---|
2900 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
2901 | * @param enmTaskSwitch The cause of the task switch.
|
---|
2902 | * @param uNextEip The EIP effective after the task switch.
|
---|
2903 | * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
|
---|
2904 | * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
|
---|
2905 | * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
|
---|
2906 | * @param SelTss The TSS selector of the new task.
|
---|
2907 | * @param pNewDescTss Pointer to the new TSS descriptor.
|
---|
2908 | */
|
---|
2909 | VBOXSTRICTRC
|
---|
2910 | iemTaskSwitch(PVMCPUCC pVCpu,
|
---|
2911 | IEMTASKSWITCH enmTaskSwitch,
|
---|
2912 | uint32_t uNextEip,
|
---|
2913 | uint32_t fFlags,
|
---|
2914 | uint16_t uErr,
|
---|
2915 | uint64_t uCr2,
|
---|
2916 | RTSEL SelTss,
|
---|
2917 | PIEMSELDESC pNewDescTss) RT_NOEXCEPT
|
---|
2918 | {
|
---|
2919 | Assert(!IEM_IS_REAL_MODE(pVCpu));
|
---|
2920 | Assert(!IEM_IS_64BIT_CODE(pVCpu));
|
---|
2921 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
|
---|
2922 |
|
---|
2923 | uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
|
---|
2924 | Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
|
---|
2925 | || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
|
---|
2926 | || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
|
---|
2927 | || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
|
---|
2928 |
|
---|
2929 | bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
|
---|
2930 | || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
|
---|
2931 |
|
---|
2932 | Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
|
---|
2933 | fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
|
---|
2934 |
|
---|
2935 | /* Update CR2 in case it's a page-fault. */
|
---|
2936 | /** @todo This should probably be done much earlier in IEM/PGM. See
|
---|
2937 | * @bugref{5653#c49}. */
|
---|
2938 | if (fFlags & IEM_XCPT_FLAGS_CR2)
|
---|
2939 | pVCpu->cpum.GstCtx.cr2 = uCr2;
|
---|
2940 |
|
---|
2941 | /*
|
---|
2942 | * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
|
---|
2943 | * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
|
---|
2944 | */
|
---|
2945 | uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
|
---|
2946 | uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
|
---|
2947 | if (uNewTssLimit < uNewTssLimitMin)
|
---|
2948 | {
|
---|
2949 | Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
|
---|
2950 | enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
|
---|
2951 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
|
---|
2952 | }
|
---|
2953 |
|
---|
2954 | /*
|
---|
2955 | * Task switches in VMX non-root mode always cause task switches.
|
---|
2956 | * The new TSS must have been read and validated (DPL, limits etc.) before a
|
---|
2957 | * task-switch VM-exit commences.
|
---|
2958 | *
|
---|
2959 | * See Intel spec. 25.4.2 "Treatment of Task Switches".
|
---|
2960 | */
|
---|
2961 | if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
|
---|
2962 | {
|
---|
2963 | Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
|
---|
2964 | IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
|
---|
2965 | }
|
---|
2966 |
|
---|
2967 | /*
|
---|
2968 | * The SVM nested-guest intercept for task-switch takes priority over all exceptions
|
---|
2969 | * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
|
---|
2970 | */
|
---|
2971 | if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
|
---|
2972 | {
|
---|
2973 | uint64_t const uExitInfo1 = SelTss;
|
---|
2974 | uint64_t uExitInfo2 = uErr;
|
---|
2975 | switch (enmTaskSwitch)
|
---|
2976 | {
|
---|
2977 | case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
|
---|
2978 | case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
|
---|
2979 | default: break;
|
---|
2980 | }
|
---|
2981 | if (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
2982 | uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
|
---|
2983 | if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
|
---|
2984 | uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
|
---|
2985 |
|
---|
2986 | Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
|
---|
2987 | IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
|
---|
2988 | RT_NOREF2(uExitInfo1, uExitInfo2);
|
---|
2989 | }
|
---|
2990 |
|
---|
2991 | /*
|
---|
2992 | * Check the current TSS limit. The last written byte to the current TSS during the
|
---|
2993 | * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
|
---|
2994 | * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
|
---|
2995 | *
|
---|
2996 | * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
|
---|
2997 | * end up with smaller than "legal" TSS limits.
|
---|
2998 | */
|
---|
2999 | uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
|
---|
3000 | uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
|
---|
3001 | if (uCurTssLimit < uCurTssLimitMin)
|
---|
3002 | {
|
---|
3003 | Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
|
---|
3004 | enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
|
---|
3005 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
|
---|
3006 | }
|
---|
3007 |
|
---|
3008 | /*
|
---|
3009 | * Verify that the new TSS can be accessed and map it. Map only the required contents
|
---|
3010 | * and not the entire TSS.
|
---|
3011 | */
|
---|
3012 | uint8_t bUnmapInfoNewTss;
|
---|
3013 | void *pvNewTss;
|
---|
3014 | uint32_t const cbNewTss = uNewTssLimitMin + 1;
|
---|
3015 | RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
|
---|
3016 | AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
|
---|
3017 | /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
|
---|
3018 | * not perform correct translation if this happens. See Intel spec. 7.2.1
|
---|
3019 | * "Task-State Segment". */
|
---|
3020 | VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
|
---|
3021 | /** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
|
---|
3022 | * Consider wrapping the remainder into a function for simpler cleanup. */
|
---|
3023 | if (rcStrict != VINF_SUCCESS)
|
---|
3024 | {
|
---|
3025 | Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
|
---|
3026 | cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3027 | return rcStrict;
|
---|
3028 | }
|
---|
3029 |
|
---|
3030 | /*
|
---|
3031 | * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
|
---|
3032 | */
|
---|
3033 | uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
|
---|
3034 | if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
|
---|
3035 | || enmTaskSwitch == IEMTASKSWITCH_IRET)
|
---|
3036 | {
|
---|
3037 | uint8_t bUnmapInfoDescCurTss;
|
---|
3038 | PX86DESC pDescCurTss;
|
---|
3039 | rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
|
---|
3040 | pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
|
---|
3041 | if (rcStrict != VINF_SUCCESS)
|
---|
3042 | {
|
---|
3043 | Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
|
---|
3044 | enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3045 | return rcStrict;
|
---|
3046 | }
|
---|
3047 |
|
---|
3048 | pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
|
---|
3049 | rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
|
---|
3050 | if (rcStrict != VINF_SUCCESS)
|
---|
3051 | {
|
---|
3052 | Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
|
---|
3053 | enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3054 | return rcStrict;
|
---|
3055 | }
|
---|
3056 |
|
---|
3057 | /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
|
---|
3058 | if (enmTaskSwitch == IEMTASKSWITCH_IRET)
|
---|
3059 | {
|
---|
3060 | Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
|
---|
3061 | || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
|
---|
3062 | fEFlags &= ~X86_EFL_NT;
|
---|
3063 | }
|
---|
3064 | }
|
---|
3065 |
|
---|
3066 | /*
|
---|
3067 | * Save the CPU state into the current TSS.
|
---|
3068 | */
|
---|
3069 | RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
|
---|
3070 | if (GCPtrNewTss == GCPtrCurTss)
|
---|
3071 | {
|
---|
3072 | Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
|
---|
3073 | Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
|
---|
3074 | pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
|
---|
3075 | pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
|
---|
3076 | pVCpu->cpum.GstCtx.ldtr.Sel));
|
---|
3077 | }
|
---|
3078 | if (fIsNewTss386)
|
---|
3079 | {
|
---|
3080 | /*
|
---|
3081 | * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
|
---|
3082 | * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
|
---|
3083 | */
|
---|
3084 | uint8_t bUnmapInfoCurTss32;
|
---|
3085 | void *pvCurTss32;
|
---|
3086 | uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
|
---|
3087 | uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
|
---|
3088 | AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
|
---|
3089 | rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
|
---|
3090 | GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
|
---|
3091 | if (rcStrict != VINF_SUCCESS)
|
---|
3092 | {
|
---|
3093 | Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
|
---|
3094 | enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3095 | return rcStrict;
|
---|
3096 | }
|
---|
3097 |
|
---|
3098 | /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
|
---|
3099 | PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
|
---|
3100 | pCurTss32->eip = uNextEip;
|
---|
3101 | pCurTss32->eflags = fEFlags;
|
---|
3102 | pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
|
---|
3103 | pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
|
---|
3104 | pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
|
---|
3105 | pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
|
---|
3106 | pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
|
---|
3107 | pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
|
---|
3108 | pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
|
---|
3109 | pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
|
---|
3110 | pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
|
---|
3111 | pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
|
---|
3112 | pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
|
---|
3113 | pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
|
---|
3114 | pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
|
---|
3115 | pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
|
---|
3116 |
|
---|
3117 | rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
|
---|
3118 | if (rcStrict != VINF_SUCCESS)
|
---|
3119 | {
|
---|
3120 | Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
|
---|
3121 | VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3122 | return rcStrict;
|
---|
3123 | }
|
---|
3124 | }
|
---|
3125 | else
|
---|
3126 | {
|
---|
3127 | /*
|
---|
3128 | * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
|
---|
3129 | */
|
---|
3130 | uint8_t bUnmapInfoCurTss16;
|
---|
3131 | void *pvCurTss16;
|
---|
3132 | uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
|
---|
3133 | uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
|
---|
3134 | AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
|
---|
3135 | rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
|
---|
3136 | GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
|
---|
3137 | if (rcStrict != VINF_SUCCESS)
|
---|
3138 | {
|
---|
3139 | Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
|
---|
3140 | enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3141 | return rcStrict;
|
---|
3142 | }
|
---|
3143 |
|
---|
3144 | /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
|
---|
3145 | PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
|
---|
3146 | pCurTss16->ip = uNextEip;
|
---|
3147 | pCurTss16->flags = (uint16_t)fEFlags;
|
---|
3148 | pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
|
---|
3149 | pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
|
---|
3150 | pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
|
---|
3151 | pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
|
---|
3152 | pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
|
---|
3153 | pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
|
---|
3154 | pCurTss16->si = pVCpu->cpum.GstCtx.si;
|
---|
3155 | pCurTss16->di = pVCpu->cpum.GstCtx.di;
|
---|
3156 | pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
|
---|
3157 | pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
|
---|
3158 | pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
|
---|
3159 | pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
|
---|
3160 |
|
---|
3161 | rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
|
---|
3162 | if (rcStrict != VINF_SUCCESS)
|
---|
3163 | {
|
---|
3164 | Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
|
---|
3165 | VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3166 | return rcStrict;
|
---|
3167 | }
|
---|
3168 | }
|
---|
3169 |
|
---|
3170 | /*
|
---|
3171 | * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
|
---|
3172 | */
|
---|
3173 | if ( enmTaskSwitch == IEMTASKSWITCH_CALL
|
---|
3174 | || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
|
---|
3175 | {
|
---|
3176 | /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
|
---|
3177 | PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
|
---|
3178 | pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
|
---|
3179 | }
|
---|
3180 |
|
---|
3181 | /*
|
---|
3182 | * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
|
---|
3183 | * it's done further below with error handling (e.g. CR3 changes will go through PGM).
|
---|
3184 | */
|
---|
3185 | uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
|
---|
3186 | uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
|
---|
3187 | bool fNewDebugTrap;
|
---|
3188 | if (fIsNewTss386)
|
---|
3189 | {
|
---|
3190 | PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
|
---|
3191 | uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
|
---|
3192 | uNewEip = pNewTss32->eip;
|
---|
3193 | uNewEflags = pNewTss32->eflags;
|
---|
3194 | uNewEax = pNewTss32->eax;
|
---|
3195 | uNewEcx = pNewTss32->ecx;
|
---|
3196 | uNewEdx = pNewTss32->edx;
|
---|
3197 | uNewEbx = pNewTss32->ebx;
|
---|
3198 | uNewEsp = pNewTss32->esp;
|
---|
3199 | uNewEbp = pNewTss32->ebp;
|
---|
3200 | uNewEsi = pNewTss32->esi;
|
---|
3201 | uNewEdi = pNewTss32->edi;
|
---|
3202 | uNewES = pNewTss32->es;
|
---|
3203 | uNewCS = pNewTss32->cs;
|
---|
3204 | uNewSS = pNewTss32->ss;
|
---|
3205 | uNewDS = pNewTss32->ds;
|
---|
3206 | uNewFS = pNewTss32->fs;
|
---|
3207 | uNewGS = pNewTss32->gs;
|
---|
3208 | uNewLdt = pNewTss32->selLdt;
|
---|
3209 | fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
|
---|
3210 | }
|
---|
3211 | else
|
---|
3212 | {
|
---|
3213 | PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
|
---|
3214 | uNewCr3 = 0;
|
---|
3215 | uNewEip = pNewTss16->ip;
|
---|
3216 | uNewEflags = pNewTss16->flags;
|
---|
3217 | uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
|
---|
3218 | uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
|
---|
3219 | uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
|
---|
3220 | uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
|
---|
3221 | uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
|
---|
3222 | uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
|
---|
3223 | uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
|
---|
3224 | uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
|
---|
3225 | uNewES = pNewTss16->es;
|
---|
3226 | uNewCS = pNewTss16->cs;
|
---|
3227 | uNewSS = pNewTss16->ss;
|
---|
3228 | uNewDS = pNewTss16->ds;
|
---|
3229 | uNewFS = 0;
|
---|
3230 | uNewGS = 0;
|
---|
3231 | uNewLdt = pNewTss16->selLdt;
|
---|
3232 | fNewDebugTrap = false;
|
---|
3233 | }
|
---|
3234 |
|
---|
3235 | if (GCPtrNewTss == GCPtrCurTss)
|
---|
3236 | Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
|
---|
3237 | uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
|
---|
3238 |
|
---|
3239 | /*
|
---|
3240 | * We're done accessing the new TSS.
|
---|
3241 | */
|
---|
3242 | rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
|
---|
3243 | if (rcStrict != VINF_SUCCESS)
|
---|
3244 | {
|
---|
3245 | Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3246 | return rcStrict;
|
---|
3247 | }
|
---|
3248 |
|
---|
3249 | /*
|
---|
3250 | * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
|
---|
3251 | */
|
---|
3252 | if (enmTaskSwitch != IEMTASKSWITCH_IRET)
|
---|
3253 | {
|
---|
3254 | rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
|
---|
3255 | pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
|
---|
3256 | if (rcStrict != VINF_SUCCESS)
|
---|
3257 | {
|
---|
3258 | Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
|
---|
3259 | enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3260 | return rcStrict;
|
---|
3261 | }
|
---|
3262 |
|
---|
3263 | /* Check that the descriptor indicates the new TSS is available (not busy). */
|
---|
3264 | AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
|
---|
3265 | || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
|
---|
3266 | ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
|
---|
3267 |
|
---|
3268 | pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
|
---|
3269 | rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
|
---|
3270 | if (rcStrict != VINF_SUCCESS)
|
---|
3271 | {
|
---|
3272 | Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
|
---|
3273 | enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3274 | return rcStrict;
|
---|
3275 | }
|
---|
3276 | }
|
---|
3277 |
|
---|
3278 | /*
|
---|
3279 | * From this point on, we're technically in the new task. We will defer exceptions
|
---|
3280 | * until the completion of the task switch but before executing any instructions in the new task.
|
---|
3281 | */
|
---|
3282 | pVCpu->cpum.GstCtx.tr.Sel = SelTss;
|
---|
3283 | pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
|
---|
3284 | pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
3285 | pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
|
---|
3286 | pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
|
---|
3287 | pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
|
---|
3288 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
|
---|
3289 |
|
---|
3290 | /* Set the busy bit in TR. */
|
---|
3291 | pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
|
---|
3292 |
|
---|
3293 | /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
|
---|
3294 | if ( enmTaskSwitch == IEMTASKSWITCH_CALL
|
---|
3295 | || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
|
---|
3296 | {
|
---|
3297 | uNewEflags |= X86_EFL_NT;
|
---|
3298 | }
|
---|
3299 |
|
---|
3300 | pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
|
---|
3301 | pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
|
---|
3302 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
|
---|
3303 |
|
---|
3304 | pVCpu->cpum.GstCtx.eip = uNewEip;
|
---|
3305 | pVCpu->cpum.GstCtx.eax = uNewEax;
|
---|
3306 | pVCpu->cpum.GstCtx.ecx = uNewEcx;
|
---|
3307 | pVCpu->cpum.GstCtx.edx = uNewEdx;
|
---|
3308 | pVCpu->cpum.GstCtx.ebx = uNewEbx;
|
---|
3309 | pVCpu->cpum.GstCtx.esp = uNewEsp;
|
---|
3310 | pVCpu->cpum.GstCtx.ebp = uNewEbp;
|
---|
3311 | pVCpu->cpum.GstCtx.esi = uNewEsi;
|
---|
3312 | pVCpu->cpum.GstCtx.edi = uNewEdi;
|
---|
3313 |
|
---|
3314 | uNewEflags &= X86_EFL_LIVE_MASK;
|
---|
3315 | uNewEflags |= X86_EFL_RA1_MASK;
|
---|
3316 | IEMMISC_SET_EFL(pVCpu, uNewEflags);
|
---|
3317 |
|
---|
3318 | /*
|
---|
3319 | * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
|
---|
3320 | * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
|
---|
3321 | * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
|
---|
3322 | */
|
---|
3323 | pVCpu->cpum.GstCtx.es.Sel = uNewES;
|
---|
3324 | pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
|
---|
3325 |
|
---|
3326 | pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
|
---|
3327 | pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
|
---|
3328 |
|
---|
3329 | pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
|
---|
3330 | pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
|
---|
3331 |
|
---|
3332 | pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
|
---|
3333 | pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
|
---|
3334 |
|
---|
3335 | pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
|
---|
3336 | pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
|
---|
3337 |
|
---|
3338 | pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
|
---|
3339 | pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
|
---|
3340 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
|
---|
3341 |
|
---|
3342 | pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
|
---|
3343 | pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
|
---|
3344 | pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
|
---|
3345 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
|
---|
3346 |
|
---|
3347 | if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
|
---|
3348 | {
|
---|
3349 | pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
|
---|
3350 | pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
|
---|
3351 | pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
|
---|
3352 | pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
|
---|
3353 | pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
|
---|
3354 | pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
|
---|
3355 | pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
|
---|
3356 | }
|
---|
3357 |
|
---|
3358 | /*
|
---|
3359 | * Switch CR3 for the new task.
|
---|
3360 | */
|
---|
3361 | if ( fIsNewTss386
|
---|
3362 | && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
|
---|
3363 | {
|
---|
3364 | /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
|
---|
3365 | int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
|
---|
3366 | AssertRCSuccessReturn(rc, rc);
|
---|
3367 |
|
---|
3368 | /* Inform PGM. */
|
---|
3369 | /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
|
---|
3370 | rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
|
---|
3371 | AssertRCReturn(rc, rc);
|
---|
3372 | /* ignore informational status codes */
|
---|
3373 |
|
---|
3374 | CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
|
---|
3375 | }
|
---|
3376 |
|
---|
3377 | /*
|
---|
3378 | * Switch LDTR for the new task.
|
---|
3379 | */
|
---|
3380 | if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
|
---|
3381 | iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
|
---|
3382 | else
|
---|
3383 | {
|
---|
3384 | Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
|
---|
3385 |
|
---|
3386 | IEMSELDESC DescNewLdt;
|
---|
3387 | rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
|
---|
3388 | if (rcStrict != VINF_SUCCESS)
|
---|
3389 | {
|
---|
3390 | Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
|
---|
3391 | uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3392 | return rcStrict;
|
---|
3393 | }
|
---|
3394 | if ( !DescNewLdt.Legacy.Gen.u1Present
|
---|
3395 | || DescNewLdt.Legacy.Gen.u1DescType
|
---|
3396 | || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
|
---|
3397 | {
|
---|
3398 | Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
|
---|
3399 | uNewLdt, DescNewLdt.Legacy.u));
|
---|
3400 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
|
---|
3401 | }
|
---|
3402 |
|
---|
3403 | pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
|
---|
3404 | pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
3405 | pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
|
---|
3406 | pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
|
---|
3407 | pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
|
---|
3408 | if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
|
---|
3409 | pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
|
---|
3410 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
|
---|
3411 | }
|
---|
3412 |
|
---|
3413 | IEMSELDESC DescSS;
|
---|
3414 | if (IEM_IS_V86_MODE(pVCpu))
|
---|
3415 | {
|
---|
3416 | IEM_SET_CPL(pVCpu, 3);
|
---|
3417 | iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
|
---|
3418 | iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
|
---|
3419 | iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
|
---|
3420 | iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
|
---|
3421 | iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
|
---|
3422 | iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
|
---|
3423 |
|
---|
3424 | /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
|
---|
3425 | DescSS.Legacy.u = 0;
|
---|
3426 | DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
|
---|
3427 | DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
|
---|
3428 | DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
|
---|
3429 | DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
|
---|
3430 | DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
|
---|
3431 | DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
|
---|
3432 | DescSS.Legacy.Gen.u2Dpl = 3;
|
---|
3433 | }
|
---|
3434 | else
|
---|
3435 | {
|
---|
3436 | uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
|
---|
3437 |
|
---|
3438 | /*
|
---|
3439 | * Load the stack segment for the new task.
|
---|
3440 | */
|
---|
3441 | if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
|
---|
3442 | {
|
---|
3443 | Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
|
---|
3444 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
|
---|
3445 | }
|
---|
3446 |
|
---|
3447 | /* Fetch the descriptor. */
|
---|
3448 | rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
|
---|
3449 | if (rcStrict != VINF_SUCCESS)
|
---|
3450 | {
|
---|
3451 | Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
|
---|
3452 | VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3453 | return rcStrict;
|
---|
3454 | }
|
---|
3455 |
|
---|
3456 | /* SS must be a data segment and writable. */
|
---|
3457 | if ( !DescSS.Legacy.Gen.u1DescType
|
---|
3458 | || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
|
---|
3459 | || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
|
---|
3460 | {
|
---|
3461 | Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
|
---|
3462 | uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
|
---|
3463 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
|
---|
3464 | }
|
---|
3465 |
|
---|
3466 | /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
|
---|
3467 | if ( (uNewSS & X86_SEL_RPL) != uNewCpl
|
---|
3468 | || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
|
---|
3469 | {
|
---|
3470 | Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
|
---|
3471 | uNewCpl));
|
---|
3472 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
|
---|
3473 | }
|
---|
3474 |
|
---|
3475 | /* Is it there? */
|
---|
3476 | if (!DescSS.Legacy.Gen.u1Present)
|
---|
3477 | {
|
---|
3478 | Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
|
---|
3479 | return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
|
---|
3480 | }
|
---|
3481 |
|
---|
3482 | uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
|
---|
3483 | uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
|
---|
3484 |
|
---|
3485 | /* Set the accessed bit before committing the result into SS. */
|
---|
3486 | if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
|
---|
3487 | {
|
---|
3488 | rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
|
---|
3489 | if (rcStrict != VINF_SUCCESS)
|
---|
3490 | return rcStrict;
|
---|
3491 | DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
|
---|
3492 | }
|
---|
3493 |
|
---|
3494 | /* Commit SS. */
|
---|
3495 | pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
|
---|
3496 | pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
|
---|
3497 | pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
|
---|
3498 | pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
|
---|
3499 | pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
|
---|
3500 | pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
3501 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
|
---|
3502 |
|
---|
3503 | /* CPL has changed, update IEM before loading rest of segments. */
|
---|
3504 | IEM_SET_CPL(pVCpu, uNewCpl);
|
---|
3505 |
|
---|
3506 | /*
|
---|
3507 | * Load the data segments for the new task.
|
---|
3508 | */
|
---|
3509 | rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
|
---|
3510 | if (rcStrict != VINF_SUCCESS)
|
---|
3511 | return rcStrict;
|
---|
3512 | rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
|
---|
3513 | if (rcStrict != VINF_SUCCESS)
|
---|
3514 | return rcStrict;
|
---|
3515 | rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
|
---|
3516 | if (rcStrict != VINF_SUCCESS)
|
---|
3517 | return rcStrict;
|
---|
3518 | rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
|
---|
3519 | if (rcStrict != VINF_SUCCESS)
|
---|
3520 | return rcStrict;
|
---|
3521 |
|
---|
3522 | /*
|
---|
3523 | * Load the code segment for the new task.
|
---|
3524 | */
|
---|
3525 | if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
|
---|
3526 | {
|
---|
3527 | Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
|
---|
3528 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
|
---|
3529 | }
|
---|
3530 |
|
---|
3531 | /* Fetch the descriptor. */
|
---|
3532 | IEMSELDESC DescCS;
|
---|
3533 | rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
|
---|
3534 | if (rcStrict != VINF_SUCCESS)
|
---|
3535 | {
|
---|
3536 | Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3537 | return rcStrict;
|
---|
3538 | }
|
---|
3539 |
|
---|
3540 | /* CS must be a code segment. */
|
---|
3541 | if ( !DescCS.Legacy.Gen.u1DescType
|
---|
3542 | || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
|
---|
3543 | {
|
---|
3544 | Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
|
---|
3545 | DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
|
---|
3546 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
|
---|
3547 | }
|
---|
3548 |
|
---|
3549 | /* For conforming CS, DPL must be less than or equal to the RPL. */
|
---|
3550 | if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
|
---|
3551 | && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
|
---|
3552 | {
|
---|
3553 | Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
|
---|
3554 | DescCS.Legacy.Gen.u2Dpl));
|
---|
3555 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
|
---|
3556 | }
|
---|
3557 |
|
---|
3558 | /* For non-conforming CS, DPL must match RPL. */
|
---|
3559 | if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
|
---|
3560 | && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
|
---|
3561 | {
|
---|
3562 | Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
|
---|
3563 | DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
|
---|
3564 | return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
|
---|
3565 | }
|
---|
3566 |
|
---|
3567 | /* Is it there? */
|
---|
3568 | if (!DescCS.Legacy.Gen.u1Present)
|
---|
3569 | {
|
---|
3570 | Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
|
---|
3571 | return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
|
---|
3572 | }
|
---|
3573 |
|
---|
3574 | cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
|
---|
3575 | u64Base = X86DESC_BASE(&DescCS.Legacy);
|
---|
3576 |
|
---|
3577 | /* Set the accessed bit before committing the result into CS. */
|
---|
3578 | if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
|
---|
3579 | {
|
---|
3580 | rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
|
---|
3581 | if (rcStrict != VINF_SUCCESS)
|
---|
3582 | return rcStrict;
|
---|
3583 | DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
|
---|
3584 | }
|
---|
3585 |
|
---|
3586 | /* Commit CS. */
|
---|
3587 | pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
|
---|
3588 | pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
|
---|
3589 | pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
|
---|
3590 | pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
|
---|
3591 | pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
|
---|
3592 | pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
3593 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
|
---|
3594 | }
|
---|
3595 |
|
---|
3596 | /* Make sure the CPU mode is correct. */
|
---|
3597 | uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
|
---|
3598 | if (fExecNew != pVCpu->iem.s.fExec)
|
---|
3599 | Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
|
---|
3600 | pVCpu->iem.s.fExec = fExecNew;
|
---|
3601 |
|
---|
3602 | /** @todo Debug trap. */
|
---|
3603 | if (fIsNewTss386 && fNewDebugTrap)
|
---|
3604 | Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
|
---|
3605 |
|
---|
3606 | /*
|
---|
3607 | * Construct the error code masks based on what caused this task switch.
|
---|
3608 | * See Intel Instruction reference for INT.
|
---|
3609 | */
|
---|
3610 | uint16_t uExt;
|
---|
3611 | if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
|
---|
3612 | && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
3613 | || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
|
---|
3614 | uExt = 1;
|
---|
3615 | else
|
---|
3616 | uExt = 0;
|
---|
3617 |
|
---|
3618 | /*
|
---|
3619 | * Push any error code on to the new stack.
|
---|
3620 | */
|
---|
3621 | if (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
3622 | {
|
---|
3623 | Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
|
---|
3624 | uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
|
---|
3625 | uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
|
---|
3626 |
|
---|
3627 | /* Check that there is sufficient space on the stack. */
|
---|
3628 | /** @todo Factor out segment limit checking for normal/expand down segments
|
---|
3629 | * into a separate function. */
|
---|
3630 | if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
|
---|
3631 | {
|
---|
3632 | if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
|
---|
3633 | || pVCpu->cpum.GstCtx.esp < cbStackFrame)
|
---|
3634 | {
|
---|
3635 | /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
|
---|
3636 | Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
|
---|
3637 | pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
|
---|
3638 | return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
|
---|
3639 | }
|
---|
3640 | }
|
---|
3641 | else
|
---|
3642 | {
|
---|
3643 | if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
|
---|
3644 | || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
|
---|
3645 | {
|
---|
3646 | Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
|
---|
3647 | pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
|
---|
3648 | return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
|
---|
3649 | }
|
---|
3650 | }
|
---|
3651 |
|
---|
3652 |
|
---|
3653 | if (fIsNewTss386)
|
---|
3654 | rcStrict = iemMemStackPushU32(pVCpu, uErr);
|
---|
3655 | else
|
---|
3656 | rcStrict = iemMemStackPushU16(pVCpu, uErr);
|
---|
3657 | if (rcStrict != VINF_SUCCESS)
|
---|
3658 | {
|
---|
3659 | Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
|
---|
3660 | fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3661 | return rcStrict;
|
---|
3662 | }
|
---|
3663 | }
|
---|
3664 |
|
---|
3665 | /* Check the new EIP against the new CS limit. */
|
---|
3666 | if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
|
---|
3667 | {
|
---|
3668 | Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
|
---|
3669 | pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
|
---|
3670 | /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
|
---|
3671 | return iemRaiseGeneralProtectionFault(pVCpu, uExt);
|
---|
3672 | }
|
---|
3673 |
|
---|
3674 | Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
|
---|
3675 | pVCpu->cpum.GstCtx.ss.Sel));
|
---|
3676 | return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
|
---|
3677 | }
|
---|
3678 |
|
---|
3679 |
|
---|
3680 | /**
|
---|
3681 | * Implements exceptions and interrupts for protected mode.
|
---|
3682 | *
|
---|
3683 | * @returns VBox strict status code.
|
---|
3684 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
3685 | * @param cbInstr The number of bytes to offset rIP by in the return
|
---|
3686 | * address.
|
---|
3687 | * @param u8Vector The interrupt / exception vector number.
|
---|
3688 | * @param fFlags The flags.
|
---|
3689 | * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
|
---|
3690 | * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
|
---|
3691 | */
|
---|
3692 | static VBOXSTRICTRC
|
---|
3693 | iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
|
---|
3694 | uint8_t cbInstr,
|
---|
3695 | uint8_t u8Vector,
|
---|
3696 | uint32_t fFlags,
|
---|
3697 | uint16_t uErr,
|
---|
3698 | uint64_t uCr2) RT_NOEXCEPT
|
---|
3699 | {
|
---|
3700 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
|
---|
3701 |
|
---|
3702 | /*
|
---|
3703 | * Read the IDT entry.
|
---|
3704 | */
|
---|
3705 | if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
|
---|
3706 | {
|
---|
3707 | Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
|
---|
3708 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
3709 | }
|
---|
3710 | X86DESC Idte;
|
---|
3711 | VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
|
---|
3712 | pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
|
---|
3713 | if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
|
---|
3714 | {
|
---|
3715 | Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3716 | return rcStrict;
|
---|
3717 | }
|
---|
3718 | Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
|
---|
3719 | u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
|
---|
3720 | Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
|
---|
3721 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
|
---|
3722 |
|
---|
3723 | /*
|
---|
3724 | * Check the descriptor type, DPL and such.
|
---|
3725 | * ASSUMES this is done in the same order as described for call-gate calls.
|
---|
3726 | */
|
---|
3727 | if (Idte.Gate.u1DescType)
|
---|
3728 | {
|
---|
3729 | Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
|
---|
3730 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
3731 | }
|
---|
3732 | bool fTaskGate = false;
|
---|
3733 | uint8_t f32BitGate = true;
|
---|
3734 | uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
|
---|
3735 | switch (Idte.Gate.u4Type)
|
---|
3736 | {
|
---|
3737 | case X86_SEL_TYPE_SYS_UNDEFINED:
|
---|
3738 | case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
|
---|
3739 | case X86_SEL_TYPE_SYS_LDT:
|
---|
3740 | case X86_SEL_TYPE_SYS_286_TSS_BUSY:
|
---|
3741 | case X86_SEL_TYPE_SYS_286_CALL_GATE:
|
---|
3742 | case X86_SEL_TYPE_SYS_UNDEFINED2:
|
---|
3743 | case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
|
---|
3744 | case X86_SEL_TYPE_SYS_UNDEFINED3:
|
---|
3745 | case X86_SEL_TYPE_SYS_386_TSS_BUSY:
|
---|
3746 | case X86_SEL_TYPE_SYS_386_CALL_GATE:
|
---|
3747 | case X86_SEL_TYPE_SYS_UNDEFINED4:
|
---|
3748 | {
|
---|
3749 | /** @todo check what actually happens when the type is wrong...
|
---|
3750 | * esp. call gates. */
|
---|
3751 | Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
|
---|
3752 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
3753 | }
|
---|
3754 |
|
---|
3755 | case X86_SEL_TYPE_SYS_286_INT_GATE:
|
---|
3756 | f32BitGate = false;
|
---|
3757 | RT_FALL_THRU();
|
---|
3758 | case X86_SEL_TYPE_SYS_386_INT_GATE:
|
---|
3759 | fEflToClear |= X86_EFL_IF;
|
---|
3760 | break;
|
---|
3761 |
|
---|
3762 | case X86_SEL_TYPE_SYS_TASK_GATE:
|
---|
3763 | fTaskGate = true;
|
---|
3764 | #ifndef IEM_IMPLEMENTS_TASKSWITCH
|
---|
3765 | IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
|
---|
3766 | #endif
|
---|
3767 | break;
|
---|
3768 |
|
---|
3769 | case X86_SEL_TYPE_SYS_286_TRAP_GATE:
|
---|
3770 | f32BitGate = false;
|
---|
3771 | break;
|
---|
3772 | case X86_SEL_TYPE_SYS_386_TRAP_GATE:
|
---|
3773 | break;
|
---|
3774 |
|
---|
3775 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
3776 | }
|
---|
3777 |
|
---|
3778 | /* Check DPL against CPL if applicable. */
|
---|
3779 | if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
3780 | {
|
---|
3781 | if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
|
---|
3782 | {
|
---|
3783 | Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
|
---|
3784 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
3785 | }
|
---|
3786 | }
|
---|
3787 |
|
---|
3788 | /* Is it there? */
|
---|
3789 | if (!Idte.Gate.u1Present)
|
---|
3790 | {
|
---|
3791 | Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
|
---|
3792 | return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
3793 | }
|
---|
3794 |
|
---|
3795 | /* Is it a task-gate? */
|
---|
3796 | if (fTaskGate)
|
---|
3797 | {
|
---|
3798 | /*
|
---|
3799 | * Construct the error code masks based on what caused this task switch.
|
---|
3800 | * See Intel Instruction reference for INT.
|
---|
3801 | */
|
---|
3802 | uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
3803 | && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
|
---|
3804 | uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
|
---|
3805 | RTSEL SelTss = Idte.Gate.u16Sel;
|
---|
3806 |
|
---|
3807 | /*
|
---|
3808 | * Fetch the TSS descriptor in the GDT.
|
---|
3809 | */
|
---|
3810 | IEMSELDESC DescTSS;
|
---|
3811 | rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
|
---|
3812 | if (rcStrict != VINF_SUCCESS)
|
---|
3813 | {
|
---|
3814 | Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
|
---|
3815 | VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3816 | return rcStrict;
|
---|
3817 | }
|
---|
3818 |
|
---|
3819 | /* The TSS descriptor must be a system segment and be available (not busy). */
|
---|
3820 | if ( DescTSS.Legacy.Gen.u1DescType
|
---|
3821 | || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
|
---|
3822 | && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
|
---|
3823 | {
|
---|
3824 | Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
|
---|
3825 | u8Vector, SelTss, DescTSS.Legacy.au64));
|
---|
3826 | return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
|
---|
3827 | }
|
---|
3828 |
|
---|
3829 | /* The TSS must be present. */
|
---|
3830 | if (!DescTSS.Legacy.Gen.u1Present)
|
---|
3831 | {
|
---|
3832 | Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
|
---|
3833 | return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
|
---|
3834 | }
|
---|
3835 |
|
---|
3836 | /* Do the actual task switch. */
|
---|
3837 | return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
|
---|
3838 | (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
|
---|
3839 | fFlags, uErr, uCr2, SelTss, &DescTSS);
|
---|
3840 | }
|
---|
3841 |
|
---|
3842 | /* A null CS is bad. */
|
---|
3843 | RTSEL NewCS = Idte.Gate.u16Sel;
|
---|
3844 | if (!(NewCS & X86_SEL_MASK_OFF_RPL))
|
---|
3845 | {
|
---|
3846 | Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
|
---|
3847 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
3848 | }
|
---|
3849 |
|
---|
3850 | /* Fetch the descriptor for the new CS. */
|
---|
3851 | IEMSELDESC DescCS;
|
---|
3852 | rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
|
---|
3853 | if (rcStrict != VINF_SUCCESS)
|
---|
3854 | {
|
---|
3855 | Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
3856 | return rcStrict;
|
---|
3857 | }
|
---|
3858 |
|
---|
3859 | /* Must be a code segment. */
|
---|
3860 | if (!DescCS.Legacy.Gen.u1DescType)
|
---|
3861 | {
|
---|
3862 | Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
|
---|
3863 | return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
|
---|
3864 | }
|
---|
3865 | if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
|
---|
3866 | {
|
---|
3867 | Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
|
---|
3868 | return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
|
---|
3869 | }
|
---|
3870 |
|
---|
3871 | /* Don't allow lowering the privilege level. */
|
---|
3872 | /** @todo Does the lowering of privileges apply to software interrupts
|
---|
3873 | * only? This has bearings on the more-privileged or
|
---|
3874 | * same-privilege stack behavior further down. A testcase would
|
---|
3875 | * be nice. */
|
---|
3876 | if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
|
---|
3877 | {
|
---|
3878 | Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
|
---|
3879 | u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
|
---|
3880 | return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
|
---|
3881 | }
|
---|
3882 |
|
---|
3883 | /* Make sure the selector is present. */
|
---|
3884 | if (!DescCS.Legacy.Gen.u1Present)
|
---|
3885 | {
|
---|
3886 | Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
|
---|
3887 | return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
|
---|
3888 | }
|
---|
3889 |
|
---|
3890 | #ifdef LOG_ENABLED
|
---|
3891 | /* If software interrupt, try decode it if logging is enabled and such. */
|
---|
3892 | if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
3893 | && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
|
---|
3894 | iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
|
---|
3895 | #endif
|
---|
3896 |
|
---|
3897 | /* Check the new EIP against the new CS limit. */
|
---|
3898 | uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
|
---|
3899 | || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
|
---|
3900 | ? Idte.Gate.u16OffsetLow
|
---|
3901 | : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
|
---|
3902 | uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
|
---|
3903 | if (uNewEip > cbLimitCS)
|
---|
3904 | {
|
---|
3905 | Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
|
---|
3906 | u8Vector, uNewEip, cbLimitCS, NewCS));
|
---|
3907 | return iemRaiseGeneralProtectionFault(pVCpu, 0);
|
---|
3908 | }
|
---|
3909 | Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
|
---|
3910 |
|
---|
3911 | /* Calc the flag image to push. */
|
---|
3912 | uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
|
---|
3913 | if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
|
---|
3914 | fEfl &= ~X86_EFL_RF;
|
---|
3915 | else
|
---|
3916 | fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
|
---|
3917 |
|
---|
3918 | /* From V8086 mode only go to CPL 0. */
|
---|
3919 | uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
|
---|
3920 | ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
|
---|
3921 | if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
|
---|
3922 | {
|
---|
3923 | Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
|
---|
3924 | return iemRaiseGeneralProtectionFault(pVCpu, 0);
|
---|
3925 | }
|
---|
3926 |
|
---|
3927 | /*
|
---|
3928 | * If the privilege level changes, we need to get a new stack from the TSS.
|
---|
3929 | * This in turns means validating the new SS and ESP...
|
---|
3930 | */
|
---|
3931 | if (uNewCpl != IEM_GET_CPL(pVCpu))
|
---|
3932 | {
|
---|
3933 | RTSEL NewSS;
|
---|
3934 | uint32_t uNewEsp;
|
---|
3935 | rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
|
---|
3936 | if (rcStrict != VINF_SUCCESS)
|
---|
3937 | return rcStrict;
|
---|
3938 |
|
---|
3939 | IEMSELDESC DescSS;
|
---|
3940 | rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
|
---|
3941 | if (rcStrict != VINF_SUCCESS)
|
---|
3942 | return rcStrict;
|
---|
3943 | /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
|
---|
3944 | if (!DescSS.Legacy.Gen.u1DefBig)
|
---|
3945 | {
|
---|
3946 | Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
|
---|
3947 | uNewEsp = (uint16_t)uNewEsp;
|
---|
3948 | }
|
---|
3949 |
|
---|
3950 | Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
|
---|
3951 |
|
---|
3952 | /* Check that there is sufficient space for the stack frame. */
|
---|
3953 | uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
|
---|
3954 | uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
|
---|
3955 | ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
|
---|
3956 | : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
|
---|
3957 |
|
---|
3958 | if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
|
---|
3959 | {
|
---|
3960 | if ( uNewEsp - 1 > cbLimitSS
|
---|
3961 | || uNewEsp < cbStackFrame)
|
---|
3962 | {
|
---|
3963 | Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
|
---|
3964 | u8Vector, NewSS, uNewEsp, cbStackFrame));
|
---|
3965 | return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
|
---|
3966 | }
|
---|
3967 | }
|
---|
3968 | else
|
---|
3969 | {
|
---|
3970 | if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
|
---|
3971 | || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
|
---|
3972 | {
|
---|
3973 | Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
|
---|
3974 | u8Vector, NewSS, uNewEsp, cbStackFrame));
|
---|
3975 | return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
|
---|
3976 | }
|
---|
3977 | }
|
---|
3978 |
|
---|
3979 | /*
|
---|
3980 | * Start making changes.
|
---|
3981 | */
|
---|
3982 |
|
---|
3983 | /* Set the new CPL so that stack accesses use it. */
|
---|
3984 | uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
|
---|
3985 | IEM_SET_CPL(pVCpu, uNewCpl);
|
---|
3986 |
|
---|
3987 | /* Create the stack frame. */
|
---|
3988 | uint8_t bUnmapInfoStackFrame;
|
---|
3989 | RTPTRUNION uStackFrame;
|
---|
3990 | rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
|
---|
3991 | uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
|
---|
3992 | IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
|
---|
3993 | if (rcStrict != VINF_SUCCESS)
|
---|
3994 | return rcStrict;
|
---|
3995 | if (f32BitGate)
|
---|
3996 | {
|
---|
3997 | if (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
3998 | *uStackFrame.pu32++ = uErr;
|
---|
3999 | uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
|
---|
4000 | uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
|
---|
4001 | uStackFrame.pu32[2] = fEfl;
|
---|
4002 | uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
|
---|
4003 | uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
|
---|
4004 | Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
|
---|
4005 | if (fEfl & X86_EFL_VM)
|
---|
4006 | {
|
---|
4007 | uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
|
---|
4008 | uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
|
---|
4009 | uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
|
---|
4010 | uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
|
---|
4011 | uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
|
---|
4012 | }
|
---|
4013 | }
|
---|
4014 | else
|
---|
4015 | {
|
---|
4016 | if (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
4017 | *uStackFrame.pu16++ = uErr;
|
---|
4018 | uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
|
---|
4019 | uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
|
---|
4020 | uStackFrame.pu16[2] = fEfl;
|
---|
4021 | uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
|
---|
4022 | uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
|
---|
4023 | Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
|
---|
4024 | if (fEfl & X86_EFL_VM)
|
---|
4025 | {
|
---|
4026 | uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
|
---|
4027 | uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
|
---|
4028 | uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
|
---|
4029 | uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
|
---|
4030 | uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
|
---|
4031 | }
|
---|
4032 | }
|
---|
4033 | rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
|
---|
4034 | if (rcStrict != VINF_SUCCESS)
|
---|
4035 | return rcStrict;
|
---|
4036 |
|
---|
4037 | /* Mark the selectors 'accessed' (hope this is the correct time). */
|
---|
4038 | /** @todo testcase: excatly _when_ are the accessed bits set - before or
|
---|
4039 | * after pushing the stack frame? (Write protect the gdt + stack to
|
---|
4040 | * find out.) */
|
---|
4041 | if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
|
---|
4042 | {
|
---|
4043 | rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
|
---|
4044 | if (rcStrict != VINF_SUCCESS)
|
---|
4045 | return rcStrict;
|
---|
4046 | DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
|
---|
4047 | }
|
---|
4048 |
|
---|
4049 | if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
|
---|
4050 | {
|
---|
4051 | rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
|
---|
4052 | if (rcStrict != VINF_SUCCESS)
|
---|
4053 | return rcStrict;
|
---|
4054 | DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
|
---|
4055 | }
|
---|
4056 |
|
---|
4057 | /*
|
---|
4058 | * Start comitting the register changes (joins with the DPL=CPL branch).
|
---|
4059 | */
|
---|
4060 | pVCpu->cpum.GstCtx.ss.Sel = NewSS;
|
---|
4061 | pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
|
---|
4062 | pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
4063 | pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
|
---|
4064 | pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
|
---|
4065 | pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
|
---|
4066 | /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
|
---|
4067 | * 16-bit handler, the high word of ESP remains unchanged (i.e. only
|
---|
4068 | * SP is loaded).
|
---|
4069 | * Need to check the other combinations too:
|
---|
4070 | * - 16-bit TSS, 32-bit handler
|
---|
4071 | * - 32-bit TSS, 16-bit handler */
|
---|
4072 | if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
|
---|
4073 | pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
|
---|
4074 | else
|
---|
4075 | pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
|
---|
4076 |
|
---|
4077 | if (fEfl & X86_EFL_VM)
|
---|
4078 | {
|
---|
4079 | iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
|
---|
4080 | iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
|
---|
4081 | iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
|
---|
4082 | iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
|
---|
4083 | }
|
---|
4084 | }
|
---|
4085 | /*
|
---|
4086 | * Same privilege, no stack change and smaller stack frame.
|
---|
4087 | */
|
---|
4088 | else
|
---|
4089 | {
|
---|
4090 | uint64_t uNewRsp;
|
---|
4091 | uint8_t bUnmapInfoStackFrame;
|
---|
4092 | RTPTRUNION uStackFrame;
|
---|
4093 | uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
|
---|
4094 | rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
|
---|
4095 | &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
|
---|
4096 | if (rcStrict != VINF_SUCCESS)
|
---|
4097 | return rcStrict;
|
---|
4098 |
|
---|
4099 | if (f32BitGate)
|
---|
4100 | {
|
---|
4101 | if (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
4102 | *uStackFrame.pu32++ = uErr;
|
---|
4103 | uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
|
---|
4104 | uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
|
---|
4105 | uStackFrame.pu32[2] = fEfl;
|
---|
4106 | }
|
---|
4107 | else
|
---|
4108 | {
|
---|
4109 | if (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
4110 | *uStackFrame.pu16++ = uErr;
|
---|
4111 | uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
|
---|
4112 | uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
|
---|
4113 | uStackFrame.pu16[2] = fEfl;
|
---|
4114 | }
|
---|
4115 | rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
|
---|
4116 | if (rcStrict != VINF_SUCCESS)
|
---|
4117 | return rcStrict;
|
---|
4118 |
|
---|
4119 | /* Mark the CS selector as 'accessed'. */
|
---|
4120 | if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
|
---|
4121 | {
|
---|
4122 | rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
|
---|
4123 | if (rcStrict != VINF_SUCCESS)
|
---|
4124 | return rcStrict;
|
---|
4125 | DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
|
---|
4126 | }
|
---|
4127 |
|
---|
4128 | /*
|
---|
4129 | * Start committing the register changes (joins with the other branch).
|
---|
4130 | */
|
---|
4131 | pVCpu->cpum.GstCtx.rsp = uNewRsp;
|
---|
4132 | }
|
---|
4133 |
|
---|
4134 | /* ... register committing continues. */
|
---|
4135 | pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
|
---|
4136 | pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
|
---|
4137 | pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
4138 | pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
|
---|
4139 | pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
|
---|
4140 | pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
|
---|
4141 |
|
---|
4142 | pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
|
---|
4143 | fEfl &= ~fEflToClear;
|
---|
4144 | IEMMISC_SET_EFL(pVCpu, fEfl);
|
---|
4145 |
|
---|
4146 | if (fFlags & IEM_XCPT_FLAGS_CR2)
|
---|
4147 | pVCpu->cpum.GstCtx.cr2 = uCr2;
|
---|
4148 |
|
---|
4149 | if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
|
---|
4150 | iemRaiseXcptAdjustState(pVCpu, u8Vector);
|
---|
4151 |
|
---|
4152 | /* Make sure the execution flags are correct. */
|
---|
4153 | uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
|
---|
4154 | if (fExecNew != pVCpu->iem.s.fExec)
|
---|
4155 | Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
|
---|
4156 | pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
|
---|
4157 | pVCpu->iem.s.fExec = fExecNew;
|
---|
4158 | Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
|
---|
4159 |
|
---|
4160 | /*
|
---|
4161 | * Deal with debug events that follows the exception and clear inhibit flags.
|
---|
4162 | */
|
---|
4163 | if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
4164 | || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
|
---|
4165 | pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
|
---|
4166 | else
|
---|
4167 | {
|
---|
4168 | Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
|
---|
4169 | u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
|
---|
4170 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
|
---|
4171 | pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
|
---|
4172 | >> CPUMCTX_DBG_HIT_DRX_SHIFT;
|
---|
4173 | pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
|
---|
4174 | return iemRaiseDebugException(pVCpu);
|
---|
4175 | }
|
---|
4176 |
|
---|
4177 | return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
|
---|
4178 | }
|
---|
4179 |
|
---|
4180 |
|
---|
4181 | /**
|
---|
4182 | * Implements exceptions and interrupts for long mode.
|
---|
4183 | *
|
---|
4184 | * @returns VBox strict status code.
|
---|
4185 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
4186 | * @param cbInstr The number of bytes to offset rIP by in the return
|
---|
4187 | * address.
|
---|
4188 | * @param u8Vector The interrupt / exception vector number.
|
---|
4189 | * @param fFlags The flags.
|
---|
4190 | * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
|
---|
4191 | * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
|
---|
4192 | */
|
---|
4193 | static VBOXSTRICTRC
|
---|
4194 | iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
|
---|
4195 | uint8_t cbInstr,
|
---|
4196 | uint8_t u8Vector,
|
---|
4197 | uint32_t fFlags,
|
---|
4198 | uint16_t uErr,
|
---|
4199 | uint64_t uCr2) RT_NOEXCEPT
|
---|
4200 | {
|
---|
4201 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
|
---|
4202 |
|
---|
4203 | /*
|
---|
4204 | * Read the IDT entry.
|
---|
4205 | */
|
---|
4206 | uint16_t offIdt = (uint16_t)u8Vector << 4;
|
---|
4207 | if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
|
---|
4208 | {
|
---|
4209 | Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
|
---|
4210 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
4211 | }
|
---|
4212 | X86DESC64 Idte;
|
---|
4213 | #ifdef _MSC_VER /* Shut up silly compiler warning. */
|
---|
4214 | Idte.au64[0] = 0;
|
---|
4215 | Idte.au64[1] = 0;
|
---|
4216 | #endif
|
---|
4217 | VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
|
---|
4218 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
4219 | rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
|
---|
4220 | if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
|
---|
4221 | {
|
---|
4222 | Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
4223 | return rcStrict;
|
---|
4224 | }
|
---|
4225 | Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
|
---|
4226 | u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
|
---|
4227 | Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
|
---|
4228 |
|
---|
4229 | /*
|
---|
4230 | * Check the descriptor type, DPL and such.
|
---|
4231 | * ASSUMES this is done in the same order as described for call-gate calls.
|
---|
4232 | */
|
---|
4233 | if (Idte.Gate.u1DescType)
|
---|
4234 | {
|
---|
4235 | Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
|
---|
4236 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
4237 | }
|
---|
4238 | uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
|
---|
4239 | switch (Idte.Gate.u4Type)
|
---|
4240 | {
|
---|
4241 | case AMD64_SEL_TYPE_SYS_INT_GATE:
|
---|
4242 | fEflToClear |= X86_EFL_IF;
|
---|
4243 | break;
|
---|
4244 | case AMD64_SEL_TYPE_SYS_TRAP_GATE:
|
---|
4245 | break;
|
---|
4246 |
|
---|
4247 | default:
|
---|
4248 | Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
|
---|
4249 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
4250 | }
|
---|
4251 |
|
---|
4252 | /* Check DPL against CPL if applicable. */
|
---|
4253 | if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
4254 | {
|
---|
4255 | if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
|
---|
4256 | {
|
---|
4257 | Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
|
---|
4258 | return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
4259 | }
|
---|
4260 | }
|
---|
4261 |
|
---|
4262 | /* Is it there? */
|
---|
4263 | if (!Idte.Gate.u1Present)
|
---|
4264 | {
|
---|
4265 | Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
|
---|
4266 | return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
|
---|
4267 | }
|
---|
4268 |
|
---|
4269 | /* A null CS is bad. */
|
---|
4270 | RTSEL NewCS = Idte.Gate.u16Sel;
|
---|
4271 | if (!(NewCS & X86_SEL_MASK_OFF_RPL))
|
---|
4272 | {
|
---|
4273 | Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
|
---|
4274 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
4275 | }
|
---|
4276 |
|
---|
4277 | /* Fetch the descriptor for the new CS. */
|
---|
4278 | IEMSELDESC DescCS;
|
---|
4279 | rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
|
---|
4280 | if (rcStrict != VINF_SUCCESS)
|
---|
4281 | {
|
---|
4282 | Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
4283 | return rcStrict;
|
---|
4284 | }
|
---|
4285 |
|
---|
4286 | /* Must be a 64-bit code segment. */
|
---|
4287 | if (!DescCS.Long.Gen.u1DescType)
|
---|
4288 | {
|
---|
4289 | Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
|
---|
4290 | return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
|
---|
4291 | }
|
---|
4292 | if ( !DescCS.Long.Gen.u1Long
|
---|
4293 | || DescCS.Long.Gen.u1DefBig
|
---|
4294 | || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
|
---|
4295 | {
|
---|
4296 | Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
|
---|
4297 | u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
|
---|
4298 | return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
|
---|
4299 | }
|
---|
4300 |
|
---|
4301 | /* Don't allow lowering the privilege level. For non-conforming CS
|
---|
4302 | selectors, the CS.DPL sets the privilege level the trap/interrupt
|
---|
4303 | handler runs at. For conforming CS selectors, the CPL remains
|
---|
4304 | unchanged, but the CS.DPL must be <= CPL. */
|
---|
4305 | /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
|
---|
4306 | * when CPU in Ring-0. Result \#GP? */
|
---|
4307 | if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
|
---|
4308 | {
|
---|
4309 | Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
|
---|
4310 | u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
|
---|
4311 | return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
|
---|
4312 | }
|
---|
4313 |
|
---|
4314 |
|
---|
4315 | /* Make sure the selector is present. */
|
---|
4316 | if (!DescCS.Legacy.Gen.u1Present)
|
---|
4317 | {
|
---|
4318 | Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
|
---|
4319 | return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
|
---|
4320 | }
|
---|
4321 |
|
---|
4322 | /* Check that the new RIP is canonical. */
|
---|
4323 | uint64_t const uNewRip = Idte.Gate.u16OffsetLow
|
---|
4324 | | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
|
---|
4325 | | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
|
---|
4326 | if (!IEM_IS_CANONICAL(uNewRip))
|
---|
4327 | {
|
---|
4328 | Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
|
---|
4329 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
4330 | }
|
---|
4331 |
|
---|
4332 | /*
|
---|
4333 | * If the privilege level changes or if the IST isn't zero, we need to get
|
---|
4334 | * a new stack from the TSS.
|
---|
4335 | */
|
---|
4336 | uint64_t uNewRsp;
|
---|
4337 | uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
|
---|
4338 | ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
|
---|
4339 | if ( uNewCpl != IEM_GET_CPL(pVCpu)
|
---|
4340 | || Idte.Gate.u3IST != 0)
|
---|
4341 | {
|
---|
4342 | rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
|
---|
4343 | if (rcStrict != VINF_SUCCESS)
|
---|
4344 | return rcStrict;
|
---|
4345 | }
|
---|
4346 | else
|
---|
4347 | uNewRsp = pVCpu->cpum.GstCtx.rsp;
|
---|
4348 | uNewRsp &= ~(uint64_t)0xf;
|
---|
4349 |
|
---|
4350 | /*
|
---|
4351 | * Calc the flag image to push.
|
---|
4352 | */
|
---|
4353 | uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
|
---|
4354 | if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
|
---|
4355 | fEfl &= ~X86_EFL_RF;
|
---|
4356 | else
|
---|
4357 | fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
|
---|
4358 |
|
---|
4359 | /*
|
---|
4360 | * Start making changes.
|
---|
4361 | */
|
---|
4362 | /* Set the new CPL so that stack accesses use it. */
|
---|
4363 | uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
|
---|
4364 | IEM_SET_CPL(pVCpu, uNewCpl);
|
---|
4365 | /** @todo Setting CPL this early seems wrong as it would affect and errors we
|
---|
4366 | * raise accessing the stack and (?) GDT/LDT... */
|
---|
4367 |
|
---|
4368 | /* Create the stack frame. */
|
---|
4369 | uint8_t bUnmapInfoStackFrame;
|
---|
4370 | uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
|
---|
4371 | RTPTRUNION uStackFrame;
|
---|
4372 | rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
|
---|
4373 | uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
|
---|
4374 | if (rcStrict != VINF_SUCCESS)
|
---|
4375 | return rcStrict;
|
---|
4376 |
|
---|
4377 | if (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
4378 | *uStackFrame.pu64++ = uErr;
|
---|
4379 | uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
|
---|
4380 | uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
|
---|
4381 | uStackFrame.pu64[2] = fEfl;
|
---|
4382 | uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
|
---|
4383 | uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
|
---|
4384 | rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
|
---|
4385 | if (rcStrict != VINF_SUCCESS)
|
---|
4386 | return rcStrict;
|
---|
4387 |
|
---|
4388 | /* Mark the CS selectors 'accessed' (hope this is the correct time). */
|
---|
4389 | /** @todo testcase: excatly _when_ are the accessed bits set - before or
|
---|
4390 | * after pushing the stack frame? (Write protect the gdt + stack to
|
---|
4391 | * find out.) */
|
---|
4392 | if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
|
---|
4393 | {
|
---|
4394 | rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
|
---|
4395 | if (rcStrict != VINF_SUCCESS)
|
---|
4396 | return rcStrict;
|
---|
4397 | DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
|
---|
4398 | }
|
---|
4399 |
|
---|
4400 | /*
|
---|
4401 | * Start comitting the register changes.
|
---|
4402 | */
|
---|
4403 | /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
|
---|
4404 | * hidden registers when interrupting 32-bit or 16-bit code! */
|
---|
4405 | if (uNewCpl != uOldCpl)
|
---|
4406 | {
|
---|
4407 | pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
|
---|
4408 | pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
|
---|
4409 | pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
4410 | pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
|
---|
4411 | pVCpu->cpum.GstCtx.ss.u64Base = 0;
|
---|
4412 | pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
|
---|
4413 | }
|
---|
4414 | pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
|
---|
4415 | pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
|
---|
4416 | pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
|
---|
4417 | pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
|
---|
4418 | pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
|
---|
4419 | pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
|
---|
4420 | pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
|
---|
4421 | pVCpu->cpum.GstCtx.rip = uNewRip;
|
---|
4422 |
|
---|
4423 | fEfl &= ~fEflToClear;
|
---|
4424 | IEMMISC_SET_EFL(pVCpu, fEfl);
|
---|
4425 |
|
---|
4426 | if (fFlags & IEM_XCPT_FLAGS_CR2)
|
---|
4427 | pVCpu->cpum.GstCtx.cr2 = uCr2;
|
---|
4428 |
|
---|
4429 | if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
|
---|
4430 | iemRaiseXcptAdjustState(pVCpu, u8Vector);
|
---|
4431 |
|
---|
4432 | iemRecalcExecModeAndCplAndAcFlags(pVCpu);
|
---|
4433 |
|
---|
4434 | /*
|
---|
4435 | * Deal with debug events that follows the exception and clear inhibit flags.
|
---|
4436 | */
|
---|
4437 | if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
4438 | || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
|
---|
4439 | pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
|
---|
4440 | else
|
---|
4441 | {
|
---|
4442 | Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
|
---|
4443 | u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
|
---|
4444 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
|
---|
4445 | pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
|
---|
4446 | >> CPUMCTX_DBG_HIT_DRX_SHIFT;
|
---|
4447 | pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
|
---|
4448 | return iemRaiseDebugException(pVCpu);
|
---|
4449 | }
|
---|
4450 |
|
---|
4451 | return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
|
---|
4452 | }
|
---|
4453 |
|
---|
4454 |
|
---|
4455 | /**
|
---|
4456 | * Implements exceptions and interrupts.
|
---|
4457 | *
|
---|
4458 | * All exceptions and interrupts goes thru this function!
|
---|
4459 | *
|
---|
4460 | * @returns VBox strict status code.
|
---|
4461 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
4462 | * @param cbInstr The number of bytes to offset rIP by in the return
|
---|
4463 | * address.
|
---|
4464 | * @param u8Vector The interrupt / exception vector number.
|
---|
4465 | * @param fFlags The flags.
|
---|
4466 | * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
|
---|
4467 | * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
|
---|
4468 | */
|
---|
4469 | VBOXSTRICTRC
|
---|
4470 | iemRaiseXcptOrInt(PVMCPUCC pVCpu,
|
---|
4471 | uint8_t cbInstr,
|
---|
4472 | uint8_t u8Vector,
|
---|
4473 | uint32_t fFlags,
|
---|
4474 | uint16_t uErr,
|
---|
4475 | uint64_t uCr2) RT_NOEXCEPT
|
---|
4476 | {
|
---|
4477 | /*
|
---|
4478 | * Get all the state that we might need here.
|
---|
4479 | */
|
---|
4480 | IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
|
---|
4481 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
|
---|
4482 |
|
---|
4483 | #ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
|
---|
4484 | /*
|
---|
4485 | * Flush prefetch buffer
|
---|
4486 | */
|
---|
4487 | pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
|
---|
4488 | #endif
|
---|
4489 |
|
---|
4490 | /*
|
---|
4491 | * Perform the V8086 IOPL check and upgrade the fault without nesting.
|
---|
4492 | */
|
---|
4493 | if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
|
---|
4494 | && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
|
---|
4495 | && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
|
---|
4496 | | IEM_XCPT_FLAGS_BP_INSTR
|
---|
4497 | | IEM_XCPT_FLAGS_ICEBP_INSTR
|
---|
4498 | | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
|
---|
4499 | && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
|
---|
4500 | {
|
---|
4501 | Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
|
---|
4502 | fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
|
---|
4503 | u8Vector = X86_XCPT_GP;
|
---|
4504 | uErr = 0;
|
---|
4505 | }
|
---|
4506 |
|
---|
4507 | PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
|
---|
4508 | #ifdef DBGFTRACE_ENABLED
|
---|
4509 | RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
|
---|
4510 | pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
|
---|
4511 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
|
---|
4512 | #endif
|
---|
4513 |
|
---|
4514 | /*
|
---|
4515 | * Check if DBGF wants to intercept the exception.
|
---|
4516 | */
|
---|
4517 | if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
|
---|
4518 | || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
|
---|
4519 | { /* likely */ }
|
---|
4520 | else
|
---|
4521 | {
|
---|
4522 | VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
|
---|
4523 | DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
|
---|
4524 | if (rcStrict != VINF_SUCCESS)
|
---|
4525 | return rcStrict;
|
---|
4526 | }
|
---|
4527 |
|
---|
4528 | /*
|
---|
4529 | * Evaluate whether NMI blocking should be in effect.
|
---|
4530 | * Normally, NMI blocking is in effect whenever we inject an NMI.
|
---|
4531 | */
|
---|
4532 | bool fBlockNmi = u8Vector == X86_XCPT_NMI
|
---|
4533 | && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
|
---|
4534 |
|
---|
4535 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
4536 | if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
|
---|
4537 | {
|
---|
4538 | VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
|
---|
4539 | if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
|
---|
4540 | return rcStrict0;
|
---|
4541 |
|
---|
4542 | /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
|
---|
4543 | if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
|
---|
4544 | {
|
---|
4545 | Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
|
---|
4546 | fBlockNmi = false;
|
---|
4547 | }
|
---|
4548 | }
|
---|
4549 | #endif
|
---|
4550 |
|
---|
4551 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
4552 | if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
|
---|
4553 | {
|
---|
4554 | /*
|
---|
4555 | * If the event is being injected as part of VMRUN, it isn't subject to event
|
---|
4556 | * intercepts in the nested-guest. However, secondary exceptions that occur
|
---|
4557 | * during injection of any event -are- subject to exception intercepts.
|
---|
4558 | *
|
---|
4559 | * See AMD spec. 15.20 "Event Injection".
|
---|
4560 | */
|
---|
4561 | if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
|
---|
4562 | pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
|
---|
4563 | else
|
---|
4564 | {
|
---|
4565 | /*
|
---|
4566 | * Check and handle if the event being raised is intercepted.
|
---|
4567 | */
|
---|
4568 | VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
|
---|
4569 | if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
|
---|
4570 | return rcStrict0;
|
---|
4571 | }
|
---|
4572 | }
|
---|
4573 | #endif
|
---|
4574 |
|
---|
4575 | /*
|
---|
4576 | * Set NMI blocking if necessary.
|
---|
4577 | */
|
---|
4578 | if (fBlockNmi)
|
---|
4579 | CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
|
---|
4580 |
|
---|
4581 | /*
|
---|
4582 | * Do recursion accounting.
|
---|
4583 | */
|
---|
4584 | uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
|
---|
4585 | uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
|
---|
4586 | if (pVCpu->iem.s.cXcptRecursions == 0)
|
---|
4587 | Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
|
---|
4588 | u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
|
---|
4589 | else
|
---|
4590 | {
|
---|
4591 | Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
|
---|
4592 | u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
|
---|
4593 | pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
|
---|
4594 |
|
---|
4595 | if (pVCpu->iem.s.cXcptRecursions >= 4)
|
---|
4596 | {
|
---|
4597 | #ifdef DEBUG_bird
|
---|
4598 | AssertFailed();
|
---|
4599 | #endif
|
---|
4600 | IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
|
---|
4601 | }
|
---|
4602 |
|
---|
4603 | /*
|
---|
4604 | * Evaluate the sequence of recurring events.
|
---|
4605 | */
|
---|
4606 | IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
|
---|
4607 | NULL /* pXcptRaiseInfo */);
|
---|
4608 | if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
|
---|
4609 | { /* likely */ }
|
---|
4610 | else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
|
---|
4611 | {
|
---|
4612 | Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
|
---|
4613 | fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
|
---|
4614 | u8Vector = X86_XCPT_DF;
|
---|
4615 | uErr = 0;
|
---|
4616 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
4617 | /* VMX nested-guest #DF intercept needs to be checked here. */
|
---|
4618 | if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
|
---|
4619 | {
|
---|
4620 | VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
|
---|
4621 | if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
|
---|
4622 | return rcStrict0;
|
---|
4623 | }
|
---|
4624 | #endif
|
---|
4625 | /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
|
---|
4626 | if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
|
---|
4627 | IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
|
---|
4628 | }
|
---|
4629 | else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
|
---|
4630 | {
|
---|
4631 | Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
|
---|
4632 | return iemInitiateCpuShutdown(pVCpu);
|
---|
4633 | }
|
---|
4634 | else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
|
---|
4635 | {
|
---|
4636 | /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
|
---|
4637 | Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
|
---|
4638 | if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
|
---|
4639 | && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
|
---|
4640 | return VERR_EM_GUEST_CPU_HANG;
|
---|
4641 | }
|
---|
4642 | else
|
---|
4643 | {
|
---|
4644 | AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
|
---|
4645 | enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
|
---|
4646 | return VERR_IEM_IPE_9;
|
---|
4647 | }
|
---|
4648 |
|
---|
4649 | /*
|
---|
4650 | * The 'EXT' bit is set when an exception occurs during deliver of an external
|
---|
4651 | * event (such as an interrupt or earlier exception)[1]. Privileged software
|
---|
4652 | * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
|
---|
4653 | * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
|
---|
4654 | *
|
---|
4655 | * [1] - Intel spec. 6.13 "Error Code"
|
---|
4656 | * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
|
---|
4657 | * [3] - Intel Instruction reference for INT n.
|
---|
4658 | */
|
---|
4659 | if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
|
---|
4660 | && (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
4661 | && u8Vector != X86_XCPT_PF
|
---|
4662 | && u8Vector != X86_XCPT_DF)
|
---|
4663 | {
|
---|
4664 | uErr |= X86_TRAP_ERR_EXTERNAL;
|
---|
4665 | }
|
---|
4666 | }
|
---|
4667 |
|
---|
4668 | pVCpu->iem.s.cXcptRecursions++;
|
---|
4669 | pVCpu->iem.s.uCurXcpt = u8Vector;
|
---|
4670 | pVCpu->iem.s.fCurXcpt = fFlags;
|
---|
4671 | pVCpu->iem.s.uCurXcptErr = uErr;
|
---|
4672 | pVCpu->iem.s.uCurXcptCr2 = uCr2;
|
---|
4673 |
|
---|
4674 | /*
|
---|
4675 | * Extensive logging.
|
---|
4676 | */
|
---|
4677 | #if defined(LOG_ENABLED) && defined(IN_RING3)
|
---|
4678 | if (LogIs3Enabled())
|
---|
4679 | {
|
---|
4680 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
|
---|
4681 | char szRegs[4096];
|
---|
4682 | DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
|
---|
4683 | "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
|
---|
4684 | "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
|
---|
4685 | "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
|
---|
4686 | "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
|
---|
4687 | "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
|
---|
4688 | "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
|
---|
4689 | "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
|
---|
4690 | "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
|
---|
4691 | "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
|
---|
4692 | "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
|
---|
4693 | "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
|
---|
4694 | "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
|
---|
4695 | "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
|
---|
4696 | "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
|
---|
4697 | "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
|
---|
4698 | "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
|
---|
4699 | " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
|
---|
4700 | " efer=%016VR{efer}\n"
|
---|
4701 | " pat=%016VR{pat}\n"
|
---|
4702 | " sf_mask=%016VR{sf_mask}\n"
|
---|
4703 | "krnl_gs_base=%016VR{krnl_gs_base}\n"
|
---|
4704 | " lstar=%016VR{lstar}\n"
|
---|
4705 | " star=%016VR{star} cstar=%016VR{cstar}\n"
|
---|
4706 | "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
|
---|
4707 | );
|
---|
4708 |
|
---|
4709 | char szInstr[256];
|
---|
4710 | DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
|
---|
4711 | DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
|
---|
4712 | szInstr, sizeof(szInstr), NULL);
|
---|
4713 | Log3(("%s%s\n", szRegs, szInstr));
|
---|
4714 | }
|
---|
4715 | #endif /* LOG_ENABLED */
|
---|
4716 |
|
---|
4717 | /*
|
---|
4718 | * Stats.
|
---|
4719 | */
|
---|
4720 | uint64_t const uTimestamp = ASMReadTSC();
|
---|
4721 | if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
|
---|
4722 | {
|
---|
4723 | STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
|
---|
4724 | EMHistoryAddExit(pVCpu,
|
---|
4725 | fFlags & IEM_XCPT_FLAGS_T_EXT_INT
|
---|
4726 | ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
|
---|
4727 | : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
|
---|
4728 | pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
|
---|
4729 | IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
|
---|
4730 | }
|
---|
4731 | else
|
---|
4732 | {
|
---|
4733 | if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
|
---|
4734 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
|
---|
4735 | EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
|
---|
4736 | pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
|
---|
4737 | if (fFlags & IEM_XCPT_FLAGS_ERR)
|
---|
4738 | EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
|
---|
4739 | if (fFlags & IEM_XCPT_FLAGS_CR2)
|
---|
4740 | EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
|
---|
4741 | IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
|
---|
4742 | }
|
---|
4743 |
|
---|
4744 | /*
|
---|
4745 | * Hack alert! Convert incoming debug events to slient on Intel.
|
---|
4746 | * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
|
---|
4747 | */
|
---|
4748 | if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
|
---|
4749 | || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
|
---|
4750 | || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
|
---|
4751 | { /* ignore */ }
|
---|
4752 | else
|
---|
4753 | {
|
---|
4754 | Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
|
---|
4755 | pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
|
---|
4756 | pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
|
---|
4757 | | CPUMCTX_DBG_HIT_DRX_SILENT;
|
---|
4758 | }
|
---|
4759 |
|
---|
4760 | /*
|
---|
4761 | * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
|
---|
4762 | * to ensure that a stale TLB or paging cache entry will only cause one
|
---|
4763 | * spurious #PF.
|
---|
4764 | */
|
---|
4765 | if ( u8Vector == X86_XCPT_PF
|
---|
4766 | && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
|
---|
4767 | IEMTlbInvalidatePage(pVCpu, uCr2);
|
---|
4768 |
|
---|
4769 | /*
|
---|
4770 | * Call the mode specific worker function.
|
---|
4771 | */
|
---|
4772 | VBOXSTRICTRC rcStrict;
|
---|
4773 | if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
|
---|
4774 | rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
|
---|
4775 | else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
|
---|
4776 | rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
|
---|
4777 | else
|
---|
4778 | rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
|
---|
4779 |
|
---|
4780 | /* Flush the prefetch buffer. */
|
---|
4781 | iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
|
---|
4782 |
|
---|
4783 | /*
|
---|
4784 | * Unwind.
|
---|
4785 | */
|
---|
4786 | pVCpu->iem.s.cXcptRecursions--;
|
---|
4787 | pVCpu->iem.s.uCurXcpt = uPrevXcpt;
|
---|
4788 | pVCpu->iem.s.fCurXcpt = fPrevXcpt;
|
---|
4789 | Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
|
---|
4790 | VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
|
---|
4791 | pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
|
---|
4792 | return rcStrict;
|
---|
4793 | }
|
---|
4794 |
|
---|
4795 | #ifdef IEM_WITH_SETJMP
|
---|
4796 | /**
|
---|
4797 | * See iemRaiseXcptOrInt. Will not return.
|
---|
4798 | */
|
---|
4799 | DECL_NO_RETURN(void)
|
---|
4800 | iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
|
---|
4801 | uint8_t cbInstr,
|
---|
4802 | uint8_t u8Vector,
|
---|
4803 | uint32_t fFlags,
|
---|
4804 | uint16_t uErr,
|
---|
4805 | uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
4806 | {
|
---|
4807 | VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
|
---|
4808 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
4809 | }
|
---|
4810 | #endif
|
---|
4811 |
|
---|
4812 |
|
---|
4813 | /** \#DE - 00. */
|
---|
4814 | VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4815 | {
|
---|
4816 | if (GCMIsInterceptingXcptDE(pVCpu))
|
---|
4817 | {
|
---|
4818 | int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
|
---|
4819 | if (rc == VINF_SUCCESS)
|
---|
4820 | {
|
---|
4821 | Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
|
---|
4822 | return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
|
---|
4823 | }
|
---|
4824 | }
|
---|
4825 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
4826 | }
|
---|
4827 |
|
---|
4828 |
|
---|
4829 | #ifdef IEM_WITH_SETJMP
|
---|
4830 | /** \#DE - 00. */
|
---|
4831 | DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
4832 | {
|
---|
4833 | iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
4834 | }
|
---|
4835 | #endif
|
---|
4836 |
|
---|
4837 |
|
---|
4838 | /** \#DB - 01.
|
---|
4839 | * @note This automatically clear DR7.GD. */
|
---|
4840 | VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4841 | {
|
---|
4842 | /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
|
---|
4843 | pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
|
---|
4844 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
|
---|
4845 | }
|
---|
4846 |
|
---|
4847 |
|
---|
4848 | /** \#BR - 05. */
|
---|
4849 | VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4850 | {
|
---|
4851 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
4852 | }
|
---|
4853 |
|
---|
4854 |
|
---|
4855 | /** \#UD - 06. */
|
---|
4856 | VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4857 | {
|
---|
4858 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
4859 | }
|
---|
4860 |
|
---|
4861 |
|
---|
4862 | #ifdef IEM_WITH_SETJMP
|
---|
4863 | /** \#UD - 06. */
|
---|
4864 | DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
4865 | {
|
---|
4866 | iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
4867 | }
|
---|
4868 | #endif
|
---|
4869 |
|
---|
4870 |
|
---|
4871 | /** \#NM - 07. */
|
---|
4872 | VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4873 | {
|
---|
4874 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
4875 | }
|
---|
4876 |
|
---|
4877 |
|
---|
4878 | #ifdef IEM_WITH_SETJMP
|
---|
4879 | /** \#NM - 07. */
|
---|
4880 | DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
4881 | {
|
---|
4882 | iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
4883 | }
|
---|
4884 | #endif
|
---|
4885 |
|
---|
4886 |
|
---|
4887 | /** \#TS(err) - 0a. */
|
---|
4888 | VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
|
---|
4889 | {
|
---|
4890 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
|
---|
4891 | }
|
---|
4892 |
|
---|
4893 |
|
---|
4894 | /** \#TS(tr) - 0a. */
|
---|
4895 | VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4896 | {
|
---|
4897 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
4898 | pVCpu->cpum.GstCtx.tr.Sel, 0);
|
---|
4899 | }
|
---|
4900 |
|
---|
4901 |
|
---|
4902 | /** \#TS(0) - 0a. */
|
---|
4903 | VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4904 | {
|
---|
4905 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
4906 | 0, 0);
|
---|
4907 | }
|
---|
4908 |
|
---|
4909 |
|
---|
4910 | /** \#TS(err) - 0a. */
|
---|
4911 | VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
|
---|
4912 | {
|
---|
4913 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
4914 | uSel & X86_SEL_MASK_OFF_RPL, 0);
|
---|
4915 | }
|
---|
4916 |
|
---|
4917 |
|
---|
4918 | /** \#NP(err) - 0b. */
|
---|
4919 | VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
|
---|
4920 | {
|
---|
4921 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
|
---|
4922 | }
|
---|
4923 |
|
---|
4924 |
|
---|
4925 | /** \#NP(sel) - 0b. */
|
---|
4926 | VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
|
---|
4927 | {
|
---|
4928 | Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
|
---|
4929 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
|
---|
4930 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
4931 | uSel & ~X86_SEL_RPL, 0);
|
---|
4932 | }
|
---|
4933 |
|
---|
4934 |
|
---|
4935 | /** \#SS(seg) - 0c. */
|
---|
4936 | VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
|
---|
4937 | {
|
---|
4938 | Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
|
---|
4939 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
|
---|
4940 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
4941 | uSel & ~X86_SEL_RPL, 0);
|
---|
4942 | }
|
---|
4943 |
|
---|
4944 |
|
---|
4945 | /** \#SS(err) - 0c. */
|
---|
4946 | VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
|
---|
4947 | {
|
---|
4948 | Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
|
---|
4949 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
|
---|
4950 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
|
---|
4951 | }
|
---|
4952 |
|
---|
4953 |
|
---|
4954 | /** \#GP(n) - 0d. */
|
---|
4955 | VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
|
---|
4956 | {
|
---|
4957 | Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
|
---|
4958 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
|
---|
4959 | }
|
---|
4960 |
|
---|
4961 |
|
---|
4962 | /** \#GP(0) - 0d. */
|
---|
4963 | VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4964 | {
|
---|
4965 | Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
4966 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
4967 | }
|
---|
4968 |
|
---|
4969 | #ifdef IEM_WITH_SETJMP
|
---|
4970 | /** \#GP(0) - 0d. */
|
---|
4971 | DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
4972 | {
|
---|
4973 | Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
4974 | iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
4975 | }
|
---|
4976 | #endif
|
---|
4977 |
|
---|
4978 |
|
---|
4979 | /** \#GP(sel) - 0d. */
|
---|
4980 | VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
|
---|
4981 | {
|
---|
4982 | Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
|
---|
4983 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
|
---|
4984 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
4985 | Sel & ~X86_SEL_RPL, 0);
|
---|
4986 | }
|
---|
4987 |
|
---|
4988 |
|
---|
4989 | /** \#GP(0) - 0d. */
|
---|
4990 | VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
4991 | {
|
---|
4992 | Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
4993 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
4994 | }
|
---|
4995 |
|
---|
4996 |
|
---|
4997 | /** \#GP(sel) - 0d. */
|
---|
4998 | VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
|
---|
4999 | {
|
---|
5000 | Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
|
---|
5001 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
|
---|
5002 | NOREF(iSegReg); NOREF(fAccess);
|
---|
5003 | return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
|
---|
5004 | IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
5005 | }
|
---|
5006 |
|
---|
5007 | #ifdef IEM_WITH_SETJMP
|
---|
5008 | /** \#GP(sel) - 0d, longjmp. */
|
---|
5009 | DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
5010 | {
|
---|
5011 | Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
|
---|
5012 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
|
---|
5013 | NOREF(iSegReg); NOREF(fAccess);
|
---|
5014 | iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
|
---|
5015 | IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
5016 | }
|
---|
5017 | #endif
|
---|
5018 |
|
---|
5019 | /** \#GP(sel) - 0d. */
|
---|
5020 | VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
|
---|
5021 | {
|
---|
5022 | Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
|
---|
5023 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
|
---|
5024 | NOREF(Sel);
|
---|
5025 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
5026 | }
|
---|
5027 |
|
---|
5028 | #ifdef IEM_WITH_SETJMP
|
---|
5029 | /** \#GP(sel) - 0d, longjmp. */
|
---|
5030 | DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
5031 | {
|
---|
5032 | Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
|
---|
5033 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
|
---|
5034 | NOREF(Sel);
|
---|
5035 | iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
5036 | }
|
---|
5037 | #endif
|
---|
5038 |
|
---|
5039 |
|
---|
5040 | /** \#GP(sel) - 0d. */
|
---|
5041 | VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
|
---|
5042 | {
|
---|
5043 | Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
|
---|
5044 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
|
---|
5045 | NOREF(iSegReg); NOREF(fAccess);
|
---|
5046 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
5047 | }
|
---|
5048 |
|
---|
5049 | #ifdef IEM_WITH_SETJMP
|
---|
5050 | /** \#GP(sel) - 0d, longjmp. */
|
---|
5051 | DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
5052 | {
|
---|
5053 | NOREF(iSegReg); NOREF(fAccess);
|
---|
5054 | iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
5055 | }
|
---|
5056 | #endif
|
---|
5057 |
|
---|
5058 |
|
---|
5059 | /** \#PF(n) - 0e. */
|
---|
5060 | VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
|
---|
5061 | {
|
---|
5062 | uint16_t uErr;
|
---|
5063 | switch (rc)
|
---|
5064 | {
|
---|
5065 | case VERR_PAGE_NOT_PRESENT:
|
---|
5066 | case VERR_PAGE_TABLE_NOT_PRESENT:
|
---|
5067 | case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
|
---|
5068 | case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
|
---|
5069 | uErr = 0;
|
---|
5070 | break;
|
---|
5071 |
|
---|
5072 | case VERR_RESERVED_PAGE_TABLE_BITS:
|
---|
5073 | uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
|
---|
5074 | break;
|
---|
5075 |
|
---|
5076 | default:
|
---|
5077 | AssertMsgFailed(("%Rrc\n", rc));
|
---|
5078 | RT_FALL_THRU();
|
---|
5079 | case VERR_ACCESS_DENIED:
|
---|
5080 | uErr = X86_TRAP_PF_P;
|
---|
5081 | break;
|
---|
5082 | }
|
---|
5083 |
|
---|
5084 | if (IEM_GET_CPL(pVCpu) == 3)
|
---|
5085 | uErr |= X86_TRAP_PF_US;
|
---|
5086 |
|
---|
5087 | if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
|
---|
5088 | && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
|
---|
5089 | && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
|
---|
5090 | uErr |= X86_TRAP_PF_ID;
|
---|
5091 |
|
---|
5092 | #if 0 /* This is so much non-sense, really. Why was it done like that? */
|
---|
5093 | /* Note! RW access callers reporting a WRITE protection fault, will clear
|
---|
5094 | the READ flag before calling. So, read-modify-write accesses (RW)
|
---|
5095 | can safely be reported as READ faults. */
|
---|
5096 | if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
|
---|
5097 | uErr |= X86_TRAP_PF_RW;
|
---|
5098 | #else
|
---|
5099 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
5100 | {
|
---|
5101 | /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
|
---|
5102 | /// (regardless of outcome of the comparison in the latter case).
|
---|
5103 | //if (!(fAccess & IEM_ACCESS_TYPE_READ))
|
---|
5104 | uErr |= X86_TRAP_PF_RW;
|
---|
5105 | }
|
---|
5106 | #endif
|
---|
5107 |
|
---|
5108 | /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
|
---|
5109 | of the memory operand rather than at the start of it. (Not sure what
|
---|
5110 | happens if it crosses a page boundrary.) The current heuristics for
|
---|
5111 | this is to report the #PF for the last byte if the access is more than
|
---|
5112 | 64 bytes. This is probably not correct, but we can work that out later,
|
---|
5113 | main objective now is to get FXSAVE to work like for real hardware and
|
---|
5114 | make bs3-cpu-basic2 work. */
|
---|
5115 | if (cbAccess <= 64)
|
---|
5116 | { /* likely*/ }
|
---|
5117 | else
|
---|
5118 | GCPtrWhere += cbAccess - 1;
|
---|
5119 |
|
---|
5120 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
|
---|
5121 | uErr, GCPtrWhere);
|
---|
5122 | }
|
---|
5123 |
|
---|
5124 | #ifdef IEM_WITH_SETJMP
|
---|
5125 | /** \#PF(n) - 0e, longjmp. */
|
---|
5126 | DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
|
---|
5127 | uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
5128 | {
|
---|
5129 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
|
---|
5130 | }
|
---|
5131 | #endif
|
---|
5132 |
|
---|
5133 |
|
---|
5134 | /** \#MF(0) - 10. */
|
---|
5135 | VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
5136 | {
|
---|
5137 | if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
|
---|
5138 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
5139 |
|
---|
5140 | /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
|
---|
5141 | PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
|
---|
5142 | return iemRegUpdateRipAndFinishClearingRF(pVCpu);
|
---|
5143 | }
|
---|
5144 |
|
---|
5145 | #ifdef IEM_WITH_SETJMP
|
---|
5146 | /** \#MF(0) - 10, longjmp. */
|
---|
5147 | DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
5148 | {
|
---|
5149 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
|
---|
5150 | }
|
---|
5151 | #endif
|
---|
5152 |
|
---|
5153 |
|
---|
5154 | /** \#AC(0) - 11. */
|
---|
5155 | VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
5156 | {
|
---|
5157 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
|
---|
5158 | }
|
---|
5159 |
|
---|
5160 | #ifdef IEM_WITH_SETJMP
|
---|
5161 | /** \#AC(0) - 11, longjmp. */
|
---|
5162 | DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
5163 | {
|
---|
5164 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
|
---|
5165 | }
|
---|
5166 | #endif
|
---|
5167 |
|
---|
5168 |
|
---|
5169 | /** \#XF(0)/\#XM(0) - 19. */
|
---|
5170 | VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
5171 | {
|
---|
5172 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
5173 | }
|
---|
5174 |
|
---|
5175 |
|
---|
5176 | #ifdef IEM_WITH_SETJMP
|
---|
5177 | /** \#XF(0)/\#XM(0) - 19s, longjmp. */
|
---|
5178 | DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
5179 | {
|
---|
5180 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
|
---|
5181 | }
|
---|
5182 | #endif
|
---|
5183 |
|
---|
5184 |
|
---|
5185 | /** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
|
---|
5186 | IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
|
---|
5187 | {
|
---|
5188 | NOREF(cbInstr);
|
---|
5189 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
5190 | }
|
---|
5191 |
|
---|
5192 |
|
---|
5193 | /** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
|
---|
5194 | IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
|
---|
5195 | {
|
---|
5196 | NOREF(cbInstr);
|
---|
5197 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
5198 | }
|
---|
5199 |
|
---|
5200 |
|
---|
5201 | /** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
|
---|
5202 | IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
|
---|
5203 | {
|
---|
5204 | NOREF(cbInstr);
|
---|
5205 | return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
|
---|
5206 | }
|
---|
5207 |
|
---|
5208 |
|
---|
5209 | /** @} */
|
---|
5210 |
|
---|
5211 | /** @name Common opcode decoders.
|
---|
5212 | * @{
|
---|
5213 | */
|
---|
5214 | //#include <iprt/mem.h>
|
---|
5215 |
|
---|
5216 | /**
|
---|
5217 | * Used to add extra details about a stub case.
|
---|
5218 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5219 | */
|
---|
5220 | void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
5221 | {
|
---|
5222 | #if defined(LOG_ENABLED) && defined(IN_RING3)
|
---|
5223 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
5224 | char szRegs[4096];
|
---|
5225 | DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
|
---|
5226 | "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
|
---|
5227 | "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
|
---|
5228 | "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
|
---|
5229 | "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
|
---|
5230 | "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
|
---|
5231 | "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
|
---|
5232 | "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
|
---|
5233 | "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
|
---|
5234 | "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
|
---|
5235 | "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
|
---|
5236 | "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
|
---|
5237 | "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
|
---|
5238 | "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
|
---|
5239 | "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
|
---|
5240 | "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
|
---|
5241 | "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
|
---|
5242 | " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
|
---|
5243 | " efer=%016VR{efer}\n"
|
---|
5244 | " pat=%016VR{pat}\n"
|
---|
5245 | " sf_mask=%016VR{sf_mask}\n"
|
---|
5246 | "krnl_gs_base=%016VR{krnl_gs_base}\n"
|
---|
5247 | " lstar=%016VR{lstar}\n"
|
---|
5248 | " star=%016VR{star} cstar=%016VR{cstar}\n"
|
---|
5249 | "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
|
---|
5250 | );
|
---|
5251 |
|
---|
5252 | char szInstr[256];
|
---|
5253 | DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
|
---|
5254 | DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
|
---|
5255 | szInstr, sizeof(szInstr), NULL);
|
---|
5256 |
|
---|
5257 | RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
|
---|
5258 | #else
|
---|
5259 | RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
|
---|
5260 | #endif
|
---|
5261 | }
|
---|
5262 |
|
---|
5263 | /** @} */
|
---|
5264 |
|
---|
5265 |
|
---|
5266 |
|
---|
5267 | /** @name Register Access.
|
---|
5268 | * @{
|
---|
5269 | */
|
---|
5270 |
|
---|
5271 | /**
|
---|
5272 | * Adds a 8-bit signed jump offset to RIP/EIP/IP.
|
---|
5273 | *
|
---|
5274 | * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
|
---|
5275 | * segment limit.
|
---|
5276 | *
|
---|
5277 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5278 | * @param cbInstr Instruction size.
|
---|
5279 | * @param offNextInstr The offset of the next instruction.
|
---|
5280 | * @param enmEffOpSize Effective operand size.
|
---|
5281 | */
|
---|
5282 | VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
|
---|
5283 | IEMMODE enmEffOpSize) RT_NOEXCEPT
|
---|
5284 | {
|
---|
5285 | switch (enmEffOpSize)
|
---|
5286 | {
|
---|
5287 | case IEMMODE_16BIT:
|
---|
5288 | {
|
---|
5289 | uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
|
---|
5290 | if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
|
---|
5291 | || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
|
---|
5292 | pVCpu->cpum.GstCtx.rip = uNewIp;
|
---|
5293 | else
|
---|
5294 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
5295 | break;
|
---|
5296 | }
|
---|
5297 |
|
---|
5298 | case IEMMODE_32BIT:
|
---|
5299 | {
|
---|
5300 | Assert(!IEM_IS_64BIT_CODE(pVCpu));
|
---|
5301 | Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
|
---|
5302 |
|
---|
5303 | uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
|
---|
5304 | if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
|
---|
5305 | pVCpu->cpum.GstCtx.rip = uNewEip;
|
---|
5306 | else
|
---|
5307 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
5308 | break;
|
---|
5309 | }
|
---|
5310 |
|
---|
5311 | case IEMMODE_64BIT:
|
---|
5312 | {
|
---|
5313 | Assert(IEM_IS_64BIT_CODE(pVCpu));
|
---|
5314 |
|
---|
5315 | uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
|
---|
5316 | if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
|
---|
5317 | pVCpu->cpum.GstCtx.rip = uNewRip;
|
---|
5318 | else
|
---|
5319 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
5320 | break;
|
---|
5321 | }
|
---|
5322 |
|
---|
5323 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
5324 | }
|
---|
5325 |
|
---|
5326 | #ifndef IEM_WITH_CODE_TLB
|
---|
5327 | /* Flush the prefetch buffer. */
|
---|
5328 | pVCpu->iem.s.cbOpcode = cbInstr;
|
---|
5329 | #endif
|
---|
5330 |
|
---|
5331 | /*
|
---|
5332 | * Clear RF and finish the instruction (maybe raise #DB).
|
---|
5333 | */
|
---|
5334 | return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
|
---|
5335 | }
|
---|
5336 |
|
---|
5337 |
|
---|
5338 | /**
|
---|
5339 | * Adds a 16-bit signed jump offset to RIP/EIP/IP.
|
---|
5340 | *
|
---|
5341 | * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
|
---|
5342 | * segment limit.
|
---|
5343 | *
|
---|
5344 | * @returns Strict VBox status code.
|
---|
5345 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5346 | * @param cbInstr Instruction size.
|
---|
5347 | * @param offNextInstr The offset of the next instruction.
|
---|
5348 | */
|
---|
5349 | VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
|
---|
5350 | {
|
---|
5351 | Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
|
---|
5352 |
|
---|
5353 | uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
|
---|
5354 | if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
|
---|
5355 | || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
|
---|
5356 | pVCpu->cpum.GstCtx.rip = uNewIp;
|
---|
5357 | else
|
---|
5358 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
5359 |
|
---|
5360 | #ifndef IEM_WITH_CODE_TLB
|
---|
5361 | /* Flush the prefetch buffer. */
|
---|
5362 | pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
|
---|
5363 | #endif
|
---|
5364 |
|
---|
5365 | /*
|
---|
5366 | * Clear RF and finish the instruction (maybe raise #DB).
|
---|
5367 | */
|
---|
5368 | return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
|
---|
5369 | }
|
---|
5370 |
|
---|
5371 |
|
---|
5372 | /**
|
---|
5373 | * Adds a 32-bit signed jump offset to RIP/EIP/IP.
|
---|
5374 | *
|
---|
5375 | * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
|
---|
5376 | * segment limit.
|
---|
5377 | *
|
---|
5378 | * @returns Strict VBox status code.
|
---|
5379 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5380 | * @param cbInstr Instruction size.
|
---|
5381 | * @param offNextInstr The offset of the next instruction.
|
---|
5382 | * @param enmEffOpSize Effective operand size.
|
---|
5383 | */
|
---|
5384 | VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
|
---|
5385 | IEMMODE enmEffOpSize) RT_NOEXCEPT
|
---|
5386 | {
|
---|
5387 | if (enmEffOpSize == IEMMODE_32BIT)
|
---|
5388 | {
|
---|
5389 | Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
|
---|
5390 |
|
---|
5391 | uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
|
---|
5392 | if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
|
---|
5393 | pVCpu->cpum.GstCtx.rip = uNewEip;
|
---|
5394 | else
|
---|
5395 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
5396 | }
|
---|
5397 | else
|
---|
5398 | {
|
---|
5399 | Assert(enmEffOpSize == IEMMODE_64BIT);
|
---|
5400 |
|
---|
5401 | uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
|
---|
5402 | if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
|
---|
5403 | pVCpu->cpum.GstCtx.rip = uNewRip;
|
---|
5404 | else
|
---|
5405 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
5406 | }
|
---|
5407 |
|
---|
5408 | #ifndef IEM_WITH_CODE_TLB
|
---|
5409 | /* Flush the prefetch buffer. */
|
---|
5410 | pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
|
---|
5411 | #endif
|
---|
5412 |
|
---|
5413 | /*
|
---|
5414 | * Clear RF and finish the instruction (maybe raise #DB).
|
---|
5415 | */
|
---|
5416 | return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
|
---|
5417 | }
|
---|
5418 |
|
---|
5419 | /** @} */
|
---|
5420 |
|
---|
5421 |
|
---|
5422 | /** @name FPU access and helpers.
|
---|
5423 | *
|
---|
5424 | * @{
|
---|
5425 | */
|
---|
5426 |
|
---|
5427 | /**
|
---|
5428 | * Updates the x87.DS and FPUDP registers.
|
---|
5429 | *
|
---|
5430 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5431 | * @param pFpuCtx The FPU context.
|
---|
5432 | * @param iEffSeg The effective segment register.
|
---|
5433 | * @param GCPtrEff The effective address relative to @a iEffSeg.
|
---|
5434 | */
|
---|
5435 | DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
|
---|
5436 | {
|
---|
5437 | RTSEL sel;
|
---|
5438 | switch (iEffSeg)
|
---|
5439 | {
|
---|
5440 | case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
|
---|
5441 | case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
|
---|
5442 | case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
|
---|
5443 | case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
|
---|
5444 | case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
|
---|
5445 | case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
|
---|
5446 | default:
|
---|
5447 | AssertMsgFailed(("%d\n", iEffSeg));
|
---|
5448 | sel = pVCpu->cpum.GstCtx.ds.Sel;
|
---|
5449 | }
|
---|
5450 | /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
|
---|
5451 | if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
|
---|
5452 | {
|
---|
5453 | pFpuCtx->DS = 0;
|
---|
5454 | pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
|
---|
5455 | }
|
---|
5456 | else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
|
---|
5457 | {
|
---|
5458 | pFpuCtx->DS = sel;
|
---|
5459 | pFpuCtx->FPUDP = GCPtrEff;
|
---|
5460 | }
|
---|
5461 | else
|
---|
5462 | *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
|
---|
5463 | }
|
---|
5464 |
|
---|
5465 |
|
---|
5466 | /**
|
---|
5467 | * Rotates the stack registers in the push direction.
|
---|
5468 | *
|
---|
5469 | * @param pFpuCtx The FPU context.
|
---|
5470 | * @remarks This is a complete waste of time, but fxsave stores the registers in
|
---|
5471 | * stack order.
|
---|
5472 | */
|
---|
5473 | DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
|
---|
5474 | {
|
---|
5475 | RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
|
---|
5476 | pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
|
---|
5477 | pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
|
---|
5478 | pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
|
---|
5479 | pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
|
---|
5480 | pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
|
---|
5481 | pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
|
---|
5482 | pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
|
---|
5483 | pFpuCtx->aRegs[0].r80 = r80Tmp;
|
---|
5484 | }
|
---|
5485 |
|
---|
5486 |
|
---|
5487 | /**
|
---|
5488 | * Rotates the stack registers in the pop direction.
|
---|
5489 | *
|
---|
5490 | * @param pFpuCtx The FPU context.
|
---|
5491 | * @remarks This is a complete waste of time, but fxsave stores the registers in
|
---|
5492 | * stack order.
|
---|
5493 | */
|
---|
5494 | DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
|
---|
5495 | {
|
---|
5496 | RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
|
---|
5497 | pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
|
---|
5498 | pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
|
---|
5499 | pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
|
---|
5500 | pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
|
---|
5501 | pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
|
---|
5502 | pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
|
---|
5503 | pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
|
---|
5504 | pFpuCtx->aRegs[7].r80 = r80Tmp;
|
---|
5505 | }
|
---|
5506 |
|
---|
5507 |
|
---|
5508 | /**
|
---|
5509 | * Updates FSW and pushes a FPU result onto the FPU stack if no pending
|
---|
5510 | * exception prevents it.
|
---|
5511 | *
|
---|
5512 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5513 | * @param pResult The FPU operation result to push.
|
---|
5514 | * @param pFpuCtx The FPU context.
|
---|
5515 | */
|
---|
5516 | static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
|
---|
5517 | {
|
---|
5518 | /* Update FSW and bail if there are pending exceptions afterwards. */
|
---|
5519 | uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
|
---|
5520 | fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
|
---|
5521 | if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
|
---|
5522 | & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
|
---|
5523 | {
|
---|
5524 | if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
|
---|
5525 | Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
|
---|
5526 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
|
---|
5527 | pFpuCtx->FSW = fFsw;
|
---|
5528 | return;
|
---|
5529 | }
|
---|
5530 |
|
---|
5531 | uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
|
---|
5532 | if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
|
---|
5533 | {
|
---|
5534 | /* All is fine, push the actual value. */
|
---|
5535 | pFpuCtx->FTW |= RT_BIT(iNewTop);
|
---|
5536 | pFpuCtx->aRegs[7].r80 = pResult->r80Result;
|
---|
5537 | }
|
---|
5538 | else if (pFpuCtx->FCW & X86_FCW_IM)
|
---|
5539 | {
|
---|
5540 | /* Masked stack overflow, push QNaN. */
|
---|
5541 | fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
|
---|
5542 | iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
|
---|
5543 | }
|
---|
5544 | else
|
---|
5545 | {
|
---|
5546 | /* Raise stack overflow, don't push anything. */
|
---|
5547 | pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
|
---|
5548 | pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
|
---|
5549 | Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
|
---|
5550 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
|
---|
5551 | return;
|
---|
5552 | }
|
---|
5553 |
|
---|
5554 | fFsw &= ~X86_FSW_TOP_MASK;
|
---|
5555 | fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
|
---|
5556 | pFpuCtx->FSW = fFsw;
|
---|
5557 |
|
---|
5558 | iemFpuRotateStackPush(pFpuCtx);
|
---|
5559 | RT_NOREF(pVCpu);
|
---|
5560 | }
|
---|
5561 |
|
---|
5562 |
|
---|
5563 | /**
|
---|
5564 | * Stores a result in a FPU register and updates the FSW and FTW.
|
---|
5565 | *
|
---|
5566 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5567 | * @param pFpuCtx The FPU context.
|
---|
5568 | * @param pResult The result to store.
|
---|
5569 | * @param iStReg Which FPU register to store it in.
|
---|
5570 | */
|
---|
5571 | static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
|
---|
5572 | {
|
---|
5573 | Assert(iStReg < 8);
|
---|
5574 | uint16_t fNewFsw = pFpuCtx->FSW;
|
---|
5575 | uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
|
---|
5576 | fNewFsw &= ~X86_FSW_C_MASK;
|
---|
5577 | fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
|
---|
5578 | if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
|
---|
5579 | Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
|
---|
5580 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
|
---|
5581 | pFpuCtx->FSW = fNewFsw;
|
---|
5582 | pFpuCtx->FTW |= RT_BIT(iReg);
|
---|
5583 | pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
|
---|
5584 | RT_NOREF(pVCpu);
|
---|
5585 | }
|
---|
5586 |
|
---|
5587 |
|
---|
5588 | /**
|
---|
5589 | * Only updates the FPU status word (FSW) with the result of the current
|
---|
5590 | * instruction.
|
---|
5591 | *
|
---|
5592 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5593 | * @param pFpuCtx The FPU context.
|
---|
5594 | * @param u16FSW The FSW output of the current instruction.
|
---|
5595 | */
|
---|
5596 | static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
|
---|
5597 | {
|
---|
5598 | uint16_t fNewFsw = pFpuCtx->FSW;
|
---|
5599 | fNewFsw &= ~X86_FSW_C_MASK;
|
---|
5600 | fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
|
---|
5601 | if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
|
---|
5602 | Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
|
---|
5603 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
|
---|
5604 | pFpuCtx->FSW = fNewFsw;
|
---|
5605 | RT_NOREF(pVCpu);
|
---|
5606 | }
|
---|
5607 |
|
---|
5608 |
|
---|
5609 | /**
|
---|
5610 | * Pops one item off the FPU stack if no pending exception prevents it.
|
---|
5611 | *
|
---|
5612 | * @param pFpuCtx The FPU context.
|
---|
5613 | */
|
---|
5614 | static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
|
---|
5615 | {
|
---|
5616 | /* Check pending exceptions. */
|
---|
5617 | uint16_t uFSW = pFpuCtx->FSW;
|
---|
5618 | if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
|
---|
5619 | & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
|
---|
5620 | return;
|
---|
5621 |
|
---|
5622 | /* TOP--. */
|
---|
5623 | uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
|
---|
5624 | uFSW &= ~X86_FSW_TOP_MASK;
|
---|
5625 | uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
|
---|
5626 | pFpuCtx->FSW = uFSW;
|
---|
5627 |
|
---|
5628 | /* Mark the previous ST0 as empty. */
|
---|
5629 | iOldTop >>= X86_FSW_TOP_SHIFT;
|
---|
5630 | pFpuCtx->FTW &= ~RT_BIT(iOldTop);
|
---|
5631 |
|
---|
5632 | /* Rotate the registers. */
|
---|
5633 | iemFpuRotateStackPop(pFpuCtx);
|
---|
5634 | }
|
---|
5635 |
|
---|
5636 |
|
---|
5637 | /**
|
---|
5638 | * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
|
---|
5639 | *
|
---|
5640 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5641 | * @param pResult The FPU operation result to push.
|
---|
5642 | * @param uFpuOpcode The FPU opcode value.
|
---|
5643 | */
|
---|
5644 | void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5645 | {
|
---|
5646 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5647 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5648 | iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
|
---|
5649 | }
|
---|
5650 |
|
---|
5651 |
|
---|
5652 | /**
|
---|
5653 | * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
|
---|
5654 | * and sets FPUDP and FPUDS.
|
---|
5655 | *
|
---|
5656 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5657 | * @param pResult The FPU operation result to push.
|
---|
5658 | * @param iEffSeg The effective segment register.
|
---|
5659 | * @param GCPtrEff The effective address relative to @a iEffSeg.
|
---|
5660 | * @param uFpuOpcode The FPU opcode value.
|
---|
5661 | */
|
---|
5662 | void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
|
---|
5663 | uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5664 | {
|
---|
5665 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5666 | iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
|
---|
5667 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5668 | iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
|
---|
5669 | }
|
---|
5670 |
|
---|
5671 |
|
---|
5672 | /**
|
---|
5673 | * Replace ST0 with the first value and push the second onto the FPU stack,
|
---|
5674 | * unless a pending exception prevents it.
|
---|
5675 | *
|
---|
5676 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5677 | * @param pResult The FPU operation result to store and push.
|
---|
5678 | * @param uFpuOpcode The FPU opcode value.
|
---|
5679 | */
|
---|
5680 | void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5681 | {
|
---|
5682 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5683 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5684 |
|
---|
5685 | /* Update FSW and bail if there are pending exceptions afterwards. */
|
---|
5686 | uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
|
---|
5687 | fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
|
---|
5688 | if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
|
---|
5689 | & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
|
---|
5690 | {
|
---|
5691 | if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
|
---|
5692 | Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
|
---|
5693 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
|
---|
5694 | pFpuCtx->FSW = fFsw;
|
---|
5695 | return;
|
---|
5696 | }
|
---|
5697 |
|
---|
5698 | uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
|
---|
5699 | if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
|
---|
5700 | {
|
---|
5701 | /* All is fine, push the actual value. */
|
---|
5702 | pFpuCtx->FTW |= RT_BIT(iNewTop);
|
---|
5703 | pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
|
---|
5704 | pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
|
---|
5705 | }
|
---|
5706 | else if (pFpuCtx->FCW & X86_FCW_IM)
|
---|
5707 | {
|
---|
5708 | /* Masked stack overflow, push QNaN. */
|
---|
5709 | fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
|
---|
5710 | iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
|
---|
5711 | iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
|
---|
5712 | }
|
---|
5713 | else
|
---|
5714 | {
|
---|
5715 | /* Raise stack overflow, don't push anything. */
|
---|
5716 | pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
|
---|
5717 | pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
|
---|
5718 | Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
|
---|
5719 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
|
---|
5720 | return;
|
---|
5721 | }
|
---|
5722 |
|
---|
5723 | fFsw &= ~X86_FSW_TOP_MASK;
|
---|
5724 | fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
|
---|
5725 | pFpuCtx->FSW = fFsw;
|
---|
5726 |
|
---|
5727 | iemFpuRotateStackPush(pFpuCtx);
|
---|
5728 | }
|
---|
5729 |
|
---|
5730 |
|
---|
5731 | /**
|
---|
5732 | * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
|
---|
5733 | * FOP.
|
---|
5734 | *
|
---|
5735 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5736 | * @param pResult The result to store.
|
---|
5737 | * @param iStReg Which FPU register to store it in.
|
---|
5738 | * @param uFpuOpcode The FPU opcode value.
|
---|
5739 | */
|
---|
5740 | void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5741 | {
|
---|
5742 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5743 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5744 | iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
|
---|
5745 | }
|
---|
5746 |
|
---|
5747 |
|
---|
5748 | /**
|
---|
5749 | * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
|
---|
5750 | * FOP, and then pops the stack.
|
---|
5751 | *
|
---|
5752 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5753 | * @param pResult The result to store.
|
---|
5754 | * @param iStReg Which FPU register to store it in.
|
---|
5755 | * @param uFpuOpcode The FPU opcode value.
|
---|
5756 | */
|
---|
5757 | void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5758 | {
|
---|
5759 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5760 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5761 | iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
|
---|
5762 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5763 | }
|
---|
5764 |
|
---|
5765 |
|
---|
5766 | /**
|
---|
5767 | * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
|
---|
5768 | * FPUDP, and FPUDS.
|
---|
5769 | *
|
---|
5770 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5771 | * @param pResult The result to store.
|
---|
5772 | * @param iStReg Which FPU register to store it in.
|
---|
5773 | * @param iEffSeg The effective memory operand selector register.
|
---|
5774 | * @param GCPtrEff The effective memory operand offset.
|
---|
5775 | * @param uFpuOpcode The FPU opcode value.
|
---|
5776 | */
|
---|
5777 | void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
|
---|
5778 | uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5779 | {
|
---|
5780 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5781 | iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
|
---|
5782 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5783 | iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
|
---|
5784 | }
|
---|
5785 |
|
---|
5786 |
|
---|
5787 | /**
|
---|
5788 | * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
|
---|
5789 | * FPUDP, and FPUDS, and then pops the stack.
|
---|
5790 | *
|
---|
5791 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5792 | * @param pResult The result to store.
|
---|
5793 | * @param iStReg Which FPU register to store it in.
|
---|
5794 | * @param iEffSeg The effective memory operand selector register.
|
---|
5795 | * @param GCPtrEff The effective memory operand offset.
|
---|
5796 | * @param uFpuOpcode The FPU opcode value.
|
---|
5797 | */
|
---|
5798 | void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
|
---|
5799 | uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5800 | {
|
---|
5801 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5802 | iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
|
---|
5803 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5804 | iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
|
---|
5805 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5806 | }
|
---|
5807 |
|
---|
5808 |
|
---|
5809 | /**
|
---|
5810 | * Updates the FOP, FPUIP, and FPUCS. For FNOP.
|
---|
5811 | *
|
---|
5812 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5813 | * @param uFpuOpcode The FPU opcode value.
|
---|
5814 | */
|
---|
5815 | void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5816 | {
|
---|
5817 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5818 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5819 | }
|
---|
5820 |
|
---|
5821 |
|
---|
5822 | /**
|
---|
5823 | * Updates the FSW, FOP, FPUIP, and FPUCS.
|
---|
5824 | *
|
---|
5825 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5826 | * @param u16FSW The FSW from the current instruction.
|
---|
5827 | * @param uFpuOpcode The FPU opcode value.
|
---|
5828 | */
|
---|
5829 | void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5830 | {
|
---|
5831 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5832 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5833 | iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
|
---|
5834 | }
|
---|
5835 |
|
---|
5836 |
|
---|
5837 | /**
|
---|
5838 | * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
|
---|
5839 | *
|
---|
5840 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5841 | * @param u16FSW The FSW from the current instruction.
|
---|
5842 | * @param uFpuOpcode The FPU opcode value.
|
---|
5843 | */
|
---|
5844 | void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5845 | {
|
---|
5846 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5847 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5848 | iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
|
---|
5849 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5850 | }
|
---|
5851 |
|
---|
5852 |
|
---|
5853 | /**
|
---|
5854 | * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
|
---|
5855 | *
|
---|
5856 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5857 | * @param u16FSW The FSW from the current instruction.
|
---|
5858 | * @param iEffSeg The effective memory operand selector register.
|
---|
5859 | * @param GCPtrEff The effective memory operand offset.
|
---|
5860 | * @param uFpuOpcode The FPU opcode value.
|
---|
5861 | */
|
---|
5862 | void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5863 | {
|
---|
5864 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5865 | iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
|
---|
5866 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5867 | iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
|
---|
5868 | }
|
---|
5869 |
|
---|
5870 |
|
---|
5871 | /**
|
---|
5872 | * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
|
---|
5873 | *
|
---|
5874 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5875 | * @param u16FSW The FSW from the current instruction.
|
---|
5876 | * @param uFpuOpcode The FPU opcode value.
|
---|
5877 | */
|
---|
5878 | void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5879 | {
|
---|
5880 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5881 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5882 | iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
|
---|
5883 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5884 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5885 | }
|
---|
5886 |
|
---|
5887 |
|
---|
5888 | /**
|
---|
5889 | * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
|
---|
5890 | *
|
---|
5891 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5892 | * @param u16FSW The FSW from the current instruction.
|
---|
5893 | * @param iEffSeg The effective memory operand selector register.
|
---|
5894 | * @param GCPtrEff The effective memory operand offset.
|
---|
5895 | * @param uFpuOpcode The FPU opcode value.
|
---|
5896 | */
|
---|
5897 | void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
|
---|
5898 | uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5899 | {
|
---|
5900 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5901 | iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
|
---|
5902 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5903 | iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
|
---|
5904 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5905 | }
|
---|
5906 |
|
---|
5907 |
|
---|
5908 | /**
|
---|
5909 | * Worker routine for raising an FPU stack underflow exception.
|
---|
5910 | *
|
---|
5911 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5912 | * @param pFpuCtx The FPU context.
|
---|
5913 | * @param iStReg The stack register being accessed.
|
---|
5914 | */
|
---|
5915 | static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
|
---|
5916 | {
|
---|
5917 | Assert(iStReg < 8 || iStReg == UINT8_MAX);
|
---|
5918 | if (pFpuCtx->FCW & X86_FCW_IM)
|
---|
5919 | {
|
---|
5920 | /* Masked underflow. */
|
---|
5921 | pFpuCtx->FSW &= ~X86_FSW_C_MASK;
|
---|
5922 | pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
|
---|
5923 | uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
|
---|
5924 | if (iStReg != UINT8_MAX)
|
---|
5925 | {
|
---|
5926 | pFpuCtx->FTW |= RT_BIT(iReg);
|
---|
5927 | iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
|
---|
5928 | }
|
---|
5929 | }
|
---|
5930 | else
|
---|
5931 | {
|
---|
5932 | pFpuCtx->FSW &= ~X86_FSW_C_MASK;
|
---|
5933 | pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
|
---|
5934 | Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
|
---|
5935 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
|
---|
5936 | }
|
---|
5937 | RT_NOREF(pVCpu);
|
---|
5938 | }
|
---|
5939 |
|
---|
5940 |
|
---|
5941 | /**
|
---|
5942 | * Raises a FPU stack underflow exception.
|
---|
5943 | *
|
---|
5944 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
5945 | * @param iStReg The destination register that should be loaded
|
---|
5946 | * with QNaN if \#IS is not masked. Specify
|
---|
5947 | * UINT8_MAX if none (like for fcom).
|
---|
5948 | * @param uFpuOpcode The FPU opcode value.
|
---|
5949 | */
|
---|
5950 | void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5951 | {
|
---|
5952 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5953 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5954 | iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
|
---|
5955 | }
|
---|
5956 |
|
---|
5957 |
|
---|
5958 | void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5959 | {
|
---|
5960 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5961 | iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
|
---|
5962 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5963 | iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
|
---|
5964 | }
|
---|
5965 |
|
---|
5966 |
|
---|
5967 | void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5968 | {
|
---|
5969 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5970 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5971 | iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
|
---|
5972 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5973 | }
|
---|
5974 |
|
---|
5975 |
|
---|
5976 | void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
|
---|
5977 | uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5978 | {
|
---|
5979 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5980 | iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
|
---|
5981 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5982 | iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
|
---|
5983 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5984 | }
|
---|
5985 |
|
---|
5986 |
|
---|
5987 | void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5988 | {
|
---|
5989 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
5990 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
5991 | iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
|
---|
5992 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5993 | iemFpuMaybePopOne(pFpuCtx);
|
---|
5994 | }
|
---|
5995 |
|
---|
5996 |
|
---|
5997 | void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
5998 | {
|
---|
5999 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
6000 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
6001 |
|
---|
6002 | if (pFpuCtx->FCW & X86_FCW_IM)
|
---|
6003 | {
|
---|
6004 | /* Masked overflow - Push QNaN. */
|
---|
6005 | uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
|
---|
6006 | pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
|
---|
6007 | pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
|
---|
6008 | pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
|
---|
6009 | pFpuCtx->FTW |= RT_BIT(iNewTop);
|
---|
6010 | iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
|
---|
6011 | iemFpuRotateStackPush(pFpuCtx);
|
---|
6012 | }
|
---|
6013 | else
|
---|
6014 | {
|
---|
6015 | /* Exception pending - don't change TOP or the register stack. */
|
---|
6016 | pFpuCtx->FSW &= ~X86_FSW_C_MASK;
|
---|
6017 | pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
|
---|
6018 | Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
|
---|
6019 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
|
---|
6020 | }
|
---|
6021 | }
|
---|
6022 |
|
---|
6023 |
|
---|
6024 | void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
6025 | {
|
---|
6026 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
6027 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
6028 |
|
---|
6029 | if (pFpuCtx->FCW & X86_FCW_IM)
|
---|
6030 | {
|
---|
6031 | /* Masked overflow - Push QNaN. */
|
---|
6032 | uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
|
---|
6033 | pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
|
---|
6034 | pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
|
---|
6035 | pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
|
---|
6036 | pFpuCtx->FTW |= RT_BIT(iNewTop);
|
---|
6037 | iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
|
---|
6038 | iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
|
---|
6039 | iemFpuRotateStackPush(pFpuCtx);
|
---|
6040 | }
|
---|
6041 | else
|
---|
6042 | {
|
---|
6043 | /* Exception pending - don't change TOP or the register stack. */
|
---|
6044 | pFpuCtx->FSW &= ~X86_FSW_C_MASK;
|
---|
6045 | pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
|
---|
6046 | Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
|
---|
6047 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
|
---|
6048 | }
|
---|
6049 | }
|
---|
6050 |
|
---|
6051 |
|
---|
6052 | /**
|
---|
6053 | * Worker routine for raising an FPU stack overflow exception on a push.
|
---|
6054 | *
|
---|
6055 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6056 | * @param pFpuCtx The FPU context.
|
---|
6057 | */
|
---|
6058 | static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
|
---|
6059 | {
|
---|
6060 | if (pFpuCtx->FCW & X86_FCW_IM)
|
---|
6061 | {
|
---|
6062 | /* Masked overflow. */
|
---|
6063 | uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
|
---|
6064 | pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
|
---|
6065 | pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
|
---|
6066 | pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
|
---|
6067 | pFpuCtx->FTW |= RT_BIT(iNewTop);
|
---|
6068 | iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
|
---|
6069 | iemFpuRotateStackPush(pFpuCtx);
|
---|
6070 | }
|
---|
6071 | else
|
---|
6072 | {
|
---|
6073 | /* Exception pending - don't change TOP or the register stack. */
|
---|
6074 | pFpuCtx->FSW &= ~X86_FSW_C_MASK;
|
---|
6075 | pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
|
---|
6076 | Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
|
---|
6077 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
|
---|
6078 | }
|
---|
6079 | RT_NOREF(pVCpu);
|
---|
6080 | }
|
---|
6081 |
|
---|
6082 |
|
---|
6083 | /**
|
---|
6084 | * Raises a FPU stack overflow exception on a push.
|
---|
6085 | *
|
---|
6086 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6087 | * @param uFpuOpcode The FPU opcode value.
|
---|
6088 | */
|
---|
6089 | void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
6090 | {
|
---|
6091 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
6092 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
6093 | iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
|
---|
6094 | }
|
---|
6095 |
|
---|
6096 |
|
---|
6097 | /**
|
---|
6098 | * Raises a FPU stack overflow exception on a push with a memory operand.
|
---|
6099 | *
|
---|
6100 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6101 | * @param iEffSeg The effective memory operand selector register.
|
---|
6102 | * @param GCPtrEff The effective memory operand offset.
|
---|
6103 | * @param uFpuOpcode The FPU opcode value.
|
---|
6104 | */
|
---|
6105 | void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
|
---|
6106 | {
|
---|
6107 | PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
6108 | iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
|
---|
6109 | iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
|
---|
6110 | iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
|
---|
6111 | }
|
---|
6112 |
|
---|
6113 | /** @} */
|
---|
6114 |
|
---|
6115 |
|
---|
6116 | /** @name Memory access.
|
---|
6117 | *
|
---|
6118 | * @{
|
---|
6119 | */
|
---|
6120 |
|
---|
6121 | #undef LOG_GROUP
|
---|
6122 | #define LOG_GROUP LOG_GROUP_IEM_MEM
|
---|
6123 |
|
---|
6124 | /**
|
---|
6125 | * Updates the IEMCPU::cbWritten counter if applicable.
|
---|
6126 | *
|
---|
6127 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6128 | * @param fAccess The access being accounted for.
|
---|
6129 | * @param cbMem The access size.
|
---|
6130 | */
|
---|
6131 | DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
|
---|
6132 | {
|
---|
6133 | if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
|
---|
6134 | || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
|
---|
6135 | pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
|
---|
6136 | }
|
---|
6137 |
|
---|
6138 |
|
---|
6139 | /**
|
---|
6140 | * Applies the segment limit, base and attributes.
|
---|
6141 | *
|
---|
6142 | * This may raise a \#GP or \#SS.
|
---|
6143 | *
|
---|
6144 | * @returns VBox strict status code.
|
---|
6145 | *
|
---|
6146 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6147 | * @param fAccess The kind of access which is being performed.
|
---|
6148 | * @param iSegReg The index of the segment register to apply.
|
---|
6149 | * This is UINT8_MAX if none (for IDT, GDT, LDT,
|
---|
6150 | * TSS, ++).
|
---|
6151 | * @param cbMem The access size.
|
---|
6152 | * @param pGCPtrMem Pointer to the guest memory address to apply
|
---|
6153 | * segmentation to. Input and output parameter.
|
---|
6154 | */
|
---|
6155 | VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
|
---|
6156 | {
|
---|
6157 | if (iSegReg == UINT8_MAX)
|
---|
6158 | return VINF_SUCCESS;
|
---|
6159 |
|
---|
6160 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
|
---|
6161 | PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
|
---|
6162 | switch (IEM_GET_CPU_MODE(pVCpu))
|
---|
6163 | {
|
---|
6164 | case IEMMODE_16BIT:
|
---|
6165 | case IEMMODE_32BIT:
|
---|
6166 | {
|
---|
6167 | RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
|
---|
6168 | RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
|
---|
6169 |
|
---|
6170 | if ( pSel->Attr.n.u1Present
|
---|
6171 | && !pSel->Attr.n.u1Unusable)
|
---|
6172 | {
|
---|
6173 | Assert(pSel->Attr.n.u1DescType);
|
---|
6174 | if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
|
---|
6175 | {
|
---|
6176 | if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
6177 | && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
|
---|
6178 | return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
|
---|
6179 |
|
---|
6180 | if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
|
---|
6181 | {
|
---|
6182 | /** @todo CPL check. */
|
---|
6183 | }
|
---|
6184 |
|
---|
6185 | /*
|
---|
6186 | * There are two kinds of data selectors, normal and expand down.
|
---|
6187 | */
|
---|
6188 | if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
|
---|
6189 | {
|
---|
6190 | if ( GCPtrFirst32 > pSel->u32Limit
|
---|
6191 | || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
|
---|
6192 | return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
|
---|
6193 | }
|
---|
6194 | else
|
---|
6195 | {
|
---|
6196 | /*
|
---|
6197 | * The upper boundary is defined by the B bit, not the G bit!
|
---|
6198 | */
|
---|
6199 | if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
|
---|
6200 | || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
|
---|
6201 | return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
|
---|
6202 | }
|
---|
6203 | *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
|
---|
6204 | }
|
---|
6205 | else
|
---|
6206 | {
|
---|
6207 | /*
|
---|
6208 | * Code selector and usually be used to read thru, writing is
|
---|
6209 | * only permitted in real and V8086 mode.
|
---|
6210 | */
|
---|
6211 | if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
6212 | || ( (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
6213 | && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
|
---|
6214 | && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
|
---|
6215 | return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
|
---|
6216 |
|
---|
6217 | if ( GCPtrFirst32 > pSel->u32Limit
|
---|
6218 | || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
|
---|
6219 | return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
|
---|
6220 |
|
---|
6221 | if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
|
---|
6222 | {
|
---|
6223 | /** @todo CPL check. */
|
---|
6224 | }
|
---|
6225 |
|
---|
6226 | *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
|
---|
6227 | }
|
---|
6228 | }
|
---|
6229 | else
|
---|
6230 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
6231 | return VINF_SUCCESS;
|
---|
6232 | }
|
---|
6233 |
|
---|
6234 | case IEMMODE_64BIT:
|
---|
6235 | {
|
---|
6236 | RTGCPTR GCPtrMem = *pGCPtrMem;
|
---|
6237 | if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
|
---|
6238 | *pGCPtrMem = GCPtrMem + pSel->u64Base;
|
---|
6239 |
|
---|
6240 | Assert(cbMem >= 1);
|
---|
6241 | if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
|
---|
6242 | return VINF_SUCCESS;
|
---|
6243 | /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
|
---|
6244 | * 4.12.2 "Data Limit Checks in 64-bit Mode". */
|
---|
6245 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
6246 | }
|
---|
6247 |
|
---|
6248 | default:
|
---|
6249 | AssertFailedReturn(VERR_IEM_IPE_7);
|
---|
6250 | }
|
---|
6251 | }
|
---|
6252 |
|
---|
6253 |
|
---|
6254 | /**
|
---|
6255 | * Translates a virtual address to a physical physical address and checks if we
|
---|
6256 | * can access the page as specified.
|
---|
6257 | *
|
---|
6258 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6259 | * @param GCPtrMem The virtual address.
|
---|
6260 | * @param cbAccess The access size, for raising \#PF correctly for
|
---|
6261 | * FXSAVE and such.
|
---|
6262 | * @param fAccess The intended access.
|
---|
6263 | * @param pGCPhysMem Where to return the physical address.
|
---|
6264 | */
|
---|
6265 | VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
|
---|
6266 | uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
|
---|
6267 | {
|
---|
6268 | /** @todo Need a different PGM interface here. We're currently using
|
---|
6269 | * generic / REM interfaces. this won't cut it for R0. */
|
---|
6270 | /** @todo If/when PGM handles paged real-mode, we can remove the hack in
|
---|
6271 | * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
|
---|
6272 | * here. */
|
---|
6273 | Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
|
---|
6274 | PGMPTWALKFAST WalkFast;
|
---|
6275 | AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
|
---|
6276 | AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
|
---|
6277 | AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
|
---|
6278 | AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
|
---|
6279 | uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
|
---|
6280 | | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
|
---|
6281 | if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
|
---|
6282 | fQPage |= PGMQPAGE_F_USER_MODE;
|
---|
6283 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
|
---|
6284 | if (RT_SUCCESS(rc))
|
---|
6285 | {
|
---|
6286 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
6287 |
|
---|
6288 | /* If the page is writable and does not have the no-exec bit set, all
|
---|
6289 | access is allowed. Otherwise we'll have to check more carefully... */
|
---|
6290 | Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
|
---|
6291 | || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
6292 | || (WalkFast.fEffective & X86_PTE_RW)
|
---|
6293 | || ( ( IEM_GET_CPL(pVCpu) != 3
|
---|
6294 | || (fAccess & IEM_ACCESS_WHAT_SYS))
|
---|
6295 | && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
|
---|
6296 | && ( (WalkFast.fEffective & X86_PTE_US)
|
---|
6297 | || IEM_GET_CPL(pVCpu) != 3
|
---|
6298 | || (fAccess & IEM_ACCESS_WHAT_SYS) )
|
---|
6299 | && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
|
---|
6300 | || !(WalkFast.fEffective & X86_PTE_PAE_NX)
|
---|
6301 | || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
|
---|
6302 | )
|
---|
6303 | );
|
---|
6304 |
|
---|
6305 | /* PGMGstQueryPageFast sets the A & D bits. */
|
---|
6306 | /** @todo testcase: check when A and D bits are actually set by the CPU. */
|
---|
6307 | Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
|
---|
6308 |
|
---|
6309 | *pGCPhysMem = WalkFast.GCPhys;
|
---|
6310 | return VINF_SUCCESS;
|
---|
6311 | }
|
---|
6312 |
|
---|
6313 | LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
|
---|
6314 | /** @todo Check unassigned memory in unpaged mode. */
|
---|
6315 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
6316 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
6317 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
6318 | #endif
|
---|
6319 | *pGCPhysMem = NIL_RTGCPHYS;
|
---|
6320 | return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
|
---|
6321 | }
|
---|
6322 |
|
---|
6323 | #if 0 /*unused*/
|
---|
6324 | /**
|
---|
6325 | * Looks up a memory mapping entry.
|
---|
6326 | *
|
---|
6327 | * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
|
---|
6328 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6329 | * @param pvMem The memory address.
|
---|
6330 | * @param fAccess The access to.
|
---|
6331 | */
|
---|
6332 | DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
|
---|
6333 | {
|
---|
6334 | Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
|
---|
6335 | fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
|
---|
6336 | if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
|
---|
6337 | && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
|
---|
6338 | return 0;
|
---|
6339 | if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
|
---|
6340 | && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
|
---|
6341 | return 1;
|
---|
6342 | if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
|
---|
6343 | && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
|
---|
6344 | return 2;
|
---|
6345 | return VERR_NOT_FOUND;
|
---|
6346 | }
|
---|
6347 | #endif
|
---|
6348 |
|
---|
6349 | /**
|
---|
6350 | * Finds a free memmap entry when using iNextMapping doesn't work.
|
---|
6351 | *
|
---|
6352 | * @returns Memory mapping index, 1024 on failure.
|
---|
6353 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6354 | */
|
---|
6355 | static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
|
---|
6356 | {
|
---|
6357 | /*
|
---|
6358 | * The easy case.
|
---|
6359 | */
|
---|
6360 | if (pVCpu->iem.s.cActiveMappings == 0)
|
---|
6361 | {
|
---|
6362 | pVCpu->iem.s.iNextMapping = 1;
|
---|
6363 | return 0;
|
---|
6364 | }
|
---|
6365 |
|
---|
6366 | /* There should be enough mappings for all instructions. */
|
---|
6367 | AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
|
---|
6368 |
|
---|
6369 | for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
|
---|
6370 | if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
|
---|
6371 | return i;
|
---|
6372 |
|
---|
6373 | AssertFailedReturn(1024);
|
---|
6374 | }
|
---|
6375 |
|
---|
6376 |
|
---|
6377 | /**
|
---|
6378 | * Commits a bounce buffer that needs writing back and unmaps it.
|
---|
6379 | *
|
---|
6380 | * @returns Strict VBox status code.
|
---|
6381 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6382 | * @param iMemMap The index of the buffer to commit.
|
---|
6383 | * @param fPostponeFail Whether we can postpone writer failures to ring-3.
|
---|
6384 | * Always false in ring-3, obviously.
|
---|
6385 | */
|
---|
6386 | static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
|
---|
6387 | {
|
---|
6388 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
|
---|
6389 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
|
---|
6390 | #ifdef IN_RING3
|
---|
6391 | Assert(!fPostponeFail);
|
---|
6392 | RT_NOREF_PV(fPostponeFail);
|
---|
6393 | #endif
|
---|
6394 |
|
---|
6395 | /*
|
---|
6396 | * Do the writing.
|
---|
6397 | */
|
---|
6398 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
6399 | if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
|
---|
6400 | {
|
---|
6401 | uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
|
---|
6402 | uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
6403 | uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
6404 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
6405 | {
|
---|
6406 | /*
|
---|
6407 | * Carefully and efficiently dealing with access handler return
|
---|
6408 | * codes make this a little bloated.
|
---|
6409 | */
|
---|
6410 | VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
|
---|
6411 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
6412 | pbBuf,
|
---|
6413 | cbFirst,
|
---|
6414 | PGMACCESSORIGIN_IEM);
|
---|
6415 | if (rcStrict == VINF_SUCCESS)
|
---|
6416 | {
|
---|
6417 | if (cbSecond)
|
---|
6418 | {
|
---|
6419 | rcStrict = PGMPhysWrite(pVM,
|
---|
6420 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
6421 | pbBuf + cbFirst,
|
---|
6422 | cbSecond,
|
---|
6423 | PGMACCESSORIGIN_IEM);
|
---|
6424 | if (rcStrict == VINF_SUCCESS)
|
---|
6425 | { /* nothing */ }
|
---|
6426 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
6427 | {
|
---|
6428 | LogEx(LOG_GROUP_IEM,
|
---|
6429 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
|
---|
6430 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
6431 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6432 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6433 | }
|
---|
6434 | #ifndef IN_RING3
|
---|
6435 | else if (fPostponeFail)
|
---|
6436 | {
|
---|
6437 | LogEx(LOG_GROUP_IEM,
|
---|
6438 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
6439 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
6440 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6441 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
6442 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
6443 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6444 | }
|
---|
6445 | #endif
|
---|
6446 | else
|
---|
6447 | {
|
---|
6448 | LogEx(LOG_GROUP_IEM,
|
---|
6449 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
6450 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
6451 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6452 | return rcStrict;
|
---|
6453 | }
|
---|
6454 | }
|
---|
6455 | }
|
---|
6456 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
6457 | {
|
---|
6458 | if (!cbSecond)
|
---|
6459 | {
|
---|
6460 | LogEx(LOG_GROUP_IEM,
|
---|
6461 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
|
---|
6462 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6463 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6464 | }
|
---|
6465 | else
|
---|
6466 | {
|
---|
6467 | VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
|
---|
6468 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
6469 | pbBuf + cbFirst,
|
---|
6470 | cbSecond,
|
---|
6471 | PGMACCESSORIGIN_IEM);
|
---|
6472 | if (rcStrict2 == VINF_SUCCESS)
|
---|
6473 | {
|
---|
6474 | LogEx(LOG_GROUP_IEM,
|
---|
6475 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
|
---|
6476 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
6477 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
6478 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6479 | }
|
---|
6480 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
|
---|
6481 | {
|
---|
6482 | LogEx(LOG_GROUP_IEM,
|
---|
6483 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
|
---|
6484 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
6485 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
6486 | PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
|
---|
6487 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6488 | }
|
---|
6489 | #ifndef IN_RING3
|
---|
6490 | else if (fPostponeFail)
|
---|
6491 | {
|
---|
6492 | LogEx(LOG_GROUP_IEM,
|
---|
6493 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
6494 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
6495 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6496 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
6497 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
6498 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6499 | }
|
---|
6500 | #endif
|
---|
6501 | else
|
---|
6502 | {
|
---|
6503 | LogEx(LOG_GROUP_IEM,
|
---|
6504 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
6505 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
6506 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
6507 | return rcStrict2;
|
---|
6508 | }
|
---|
6509 | }
|
---|
6510 | }
|
---|
6511 | #ifndef IN_RING3
|
---|
6512 | else if (fPostponeFail)
|
---|
6513 | {
|
---|
6514 | LogEx(LOG_GROUP_IEM,
|
---|
6515 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
6516 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
6517 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6518 | if (!cbSecond)
|
---|
6519 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
|
---|
6520 | else
|
---|
6521 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
6522 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
6523 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6524 | }
|
---|
6525 | #endif
|
---|
6526 | else
|
---|
6527 | {
|
---|
6528 | LogEx(LOG_GROUP_IEM,
|
---|
6529 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
|
---|
6530 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
6531 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
6532 | return rcStrict;
|
---|
6533 | }
|
---|
6534 | }
|
---|
6535 | else
|
---|
6536 | {
|
---|
6537 | /*
|
---|
6538 | * No access handlers, much simpler.
|
---|
6539 | */
|
---|
6540 | int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
|
---|
6541 | if (RT_SUCCESS(rc))
|
---|
6542 | {
|
---|
6543 | if (cbSecond)
|
---|
6544 | {
|
---|
6545 | rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
|
---|
6546 | if (RT_SUCCESS(rc))
|
---|
6547 | { /* likely */ }
|
---|
6548 | else
|
---|
6549 | {
|
---|
6550 | LogEx(LOG_GROUP_IEM,
|
---|
6551 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
6552 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
6553 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
|
---|
6554 | return rc;
|
---|
6555 | }
|
---|
6556 | }
|
---|
6557 | }
|
---|
6558 | else
|
---|
6559 | {
|
---|
6560 | LogEx(LOG_GROUP_IEM,
|
---|
6561 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
|
---|
6562 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
|
---|
6563 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
6564 | return rc;
|
---|
6565 | }
|
---|
6566 | }
|
---|
6567 | }
|
---|
6568 |
|
---|
6569 | #if defined(IEM_LOG_MEMORY_WRITES)
|
---|
6570 | Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
6571 | RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
|
---|
6572 | if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
|
---|
6573 | Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
6574 | RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
|
---|
6575 | &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
|
---|
6576 |
|
---|
6577 | size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
6578 | g_cbIemWrote = cbWrote;
|
---|
6579 | memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
|
---|
6580 | #endif
|
---|
6581 |
|
---|
6582 | /*
|
---|
6583 | * Free the mapping entry.
|
---|
6584 | */
|
---|
6585 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
6586 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
6587 | pVCpu->iem.s.cActiveMappings--;
|
---|
6588 | return VINF_SUCCESS;
|
---|
6589 | }
|
---|
6590 |
|
---|
6591 |
|
---|
6592 | /**
|
---|
6593 | * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
|
---|
6594 | */
|
---|
6595 | DECL_FORCE_INLINE(uint32_t)
|
---|
6596 | iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
|
---|
6597 | {
|
---|
6598 | bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
|
---|
6599 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
6600 | return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
|
---|
6601 | return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
|
---|
6602 | }
|
---|
6603 |
|
---|
6604 |
|
---|
6605 | /**
|
---|
6606 | * iemMemMap worker that deals with a request crossing pages.
|
---|
6607 | */
|
---|
6608 | static VBOXSTRICTRC
|
---|
6609 | iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
|
---|
6610 | size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
|
---|
6611 | {
|
---|
6612 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
|
---|
6613 | Assert(cbMem <= GUEST_PAGE_SIZE);
|
---|
6614 |
|
---|
6615 | /*
|
---|
6616 | * Do the address translations.
|
---|
6617 | */
|
---|
6618 | uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
|
---|
6619 | RTGCPHYS GCPhysFirst;
|
---|
6620 | VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
|
---|
6621 | if (rcStrict != VINF_SUCCESS)
|
---|
6622 | return rcStrict;
|
---|
6623 | Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
|
---|
6624 |
|
---|
6625 | uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
|
---|
6626 | RTGCPHYS GCPhysSecond;
|
---|
6627 | rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
|
---|
6628 | cbSecondPage, fAccess, &GCPhysSecond);
|
---|
6629 | if (rcStrict != VINF_SUCCESS)
|
---|
6630 | return rcStrict;
|
---|
6631 | Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
|
---|
6632 | GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
|
---|
6633 |
|
---|
6634 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
6635 |
|
---|
6636 | /*
|
---|
6637 | * Check for data breakpoints.
|
---|
6638 | */
|
---|
6639 | if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
|
---|
6640 | { /* likely */ }
|
---|
6641 | else
|
---|
6642 | {
|
---|
6643 | uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
|
---|
6644 | fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
|
---|
6645 | cbSecondPage, fAccess);
|
---|
6646 | pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
|
---|
6647 | if (fDataBps > 1)
|
---|
6648 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
|
---|
6649 | fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
6650 | }
|
---|
6651 |
|
---|
6652 | /*
|
---|
6653 | * Read in the current memory content if it's a read, execute or partial
|
---|
6654 | * write access.
|
---|
6655 | */
|
---|
6656 | uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
6657 |
|
---|
6658 | if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
|
---|
6659 | {
|
---|
6660 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
6661 | {
|
---|
6662 | /*
|
---|
6663 | * Must carefully deal with access handler status codes here,
|
---|
6664 | * makes the code a bit bloated.
|
---|
6665 | */
|
---|
6666 | rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
|
---|
6667 | if (rcStrict == VINF_SUCCESS)
|
---|
6668 | {
|
---|
6669 | rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
|
---|
6670 | if (rcStrict == VINF_SUCCESS)
|
---|
6671 | { /*likely */ }
|
---|
6672 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
6673 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6674 | else
|
---|
6675 | {
|
---|
6676 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
|
---|
6677 | GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6678 | return rcStrict;
|
---|
6679 | }
|
---|
6680 | }
|
---|
6681 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
6682 | {
|
---|
6683 | VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
|
---|
6684 | if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
|
---|
6685 | {
|
---|
6686 | PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
|
---|
6687 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6688 | }
|
---|
6689 | else
|
---|
6690 | {
|
---|
6691 | LogEx(LOG_GROUP_IEM,
|
---|
6692 | ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
|
---|
6693 | GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
6694 | return rcStrict2;
|
---|
6695 | }
|
---|
6696 | }
|
---|
6697 | else
|
---|
6698 | {
|
---|
6699 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
6700 | GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6701 | return rcStrict;
|
---|
6702 | }
|
---|
6703 | }
|
---|
6704 | else
|
---|
6705 | {
|
---|
6706 | /*
|
---|
6707 | * No informational status codes here, much more straight forward.
|
---|
6708 | */
|
---|
6709 | int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
|
---|
6710 | if (RT_SUCCESS(rc))
|
---|
6711 | {
|
---|
6712 | Assert(rc == VINF_SUCCESS);
|
---|
6713 | rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
|
---|
6714 | if (RT_SUCCESS(rc))
|
---|
6715 | Assert(rc == VINF_SUCCESS);
|
---|
6716 | else
|
---|
6717 | {
|
---|
6718 | LogEx(LOG_GROUP_IEM,
|
---|
6719 | ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
|
---|
6720 | return rc;
|
---|
6721 | }
|
---|
6722 | }
|
---|
6723 | else
|
---|
6724 | {
|
---|
6725 | LogEx(LOG_GROUP_IEM,
|
---|
6726 | ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
|
---|
6727 | return rc;
|
---|
6728 | }
|
---|
6729 | }
|
---|
6730 | }
|
---|
6731 | #ifdef VBOX_STRICT
|
---|
6732 | else
|
---|
6733 | memset(pbBuf, 0xcc, cbMem);
|
---|
6734 | if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
|
---|
6735 | memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
|
---|
6736 | #endif
|
---|
6737 | AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
|
---|
6738 |
|
---|
6739 | /*
|
---|
6740 | * Commit the bounce buffer entry.
|
---|
6741 | */
|
---|
6742 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
|
---|
6743 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
|
---|
6744 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
|
---|
6745 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
|
---|
6746 | pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
|
---|
6747 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
|
---|
6748 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
|
---|
6749 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
6750 | pVCpu->iem.s.cActiveMappings++;
|
---|
6751 |
|
---|
6752 | iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
|
---|
6753 | *ppvMem = pbBuf;
|
---|
6754 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
6755 | return VINF_SUCCESS;
|
---|
6756 | }
|
---|
6757 |
|
---|
6758 |
|
---|
6759 | /**
|
---|
6760 | * iemMemMap woker that deals with iemMemPageMap failures.
|
---|
6761 | */
|
---|
6762 | static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
|
---|
6763 | RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
|
---|
6764 | {
|
---|
6765 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
|
---|
6766 |
|
---|
6767 | /*
|
---|
6768 | * Filter out conditions we can handle and the ones which shouldn't happen.
|
---|
6769 | */
|
---|
6770 | if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
|
---|
6771 | && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
|
---|
6772 | && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
|
---|
6773 | {
|
---|
6774 | AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
|
---|
6775 | return rcMap;
|
---|
6776 | }
|
---|
6777 | pVCpu->iem.s.cPotentialExits++;
|
---|
6778 |
|
---|
6779 | /*
|
---|
6780 | * Read in the current memory content if it's a read, execute or partial
|
---|
6781 | * write access.
|
---|
6782 | */
|
---|
6783 | uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
6784 | if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
|
---|
6785 | {
|
---|
6786 | if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
|
---|
6787 | memset(pbBuf, 0xff, cbMem);
|
---|
6788 | else
|
---|
6789 | {
|
---|
6790 | int rc;
|
---|
6791 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
6792 | {
|
---|
6793 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
|
---|
6794 | if (rcStrict == VINF_SUCCESS)
|
---|
6795 | { /* nothing */ }
|
---|
6796 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
6797 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
6798 | else
|
---|
6799 | {
|
---|
6800 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
6801 | GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
6802 | return rcStrict;
|
---|
6803 | }
|
---|
6804 | }
|
---|
6805 | else
|
---|
6806 | {
|
---|
6807 | rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
|
---|
6808 | if (RT_SUCCESS(rc))
|
---|
6809 | { /* likely */ }
|
---|
6810 | else
|
---|
6811 | {
|
---|
6812 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
6813 | GCPhysFirst, rc));
|
---|
6814 | return rc;
|
---|
6815 | }
|
---|
6816 | }
|
---|
6817 | }
|
---|
6818 | }
|
---|
6819 | #ifdef VBOX_STRICT
|
---|
6820 | else
|
---|
6821 | memset(pbBuf, 0xcc, cbMem);
|
---|
6822 | #endif
|
---|
6823 | #ifdef VBOX_STRICT
|
---|
6824 | if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
|
---|
6825 | memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
|
---|
6826 | #endif
|
---|
6827 |
|
---|
6828 | /*
|
---|
6829 | * Commit the bounce buffer entry.
|
---|
6830 | */
|
---|
6831 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
|
---|
6832 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
|
---|
6833 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
|
---|
6834 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
|
---|
6835 | pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
|
---|
6836 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
|
---|
6837 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
|
---|
6838 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
6839 | pVCpu->iem.s.cActiveMappings++;
|
---|
6840 |
|
---|
6841 | iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
|
---|
6842 | *ppvMem = pbBuf;
|
---|
6843 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
6844 | return VINF_SUCCESS;
|
---|
6845 | }
|
---|
6846 |
|
---|
6847 |
|
---|
6848 |
|
---|
6849 | /**
|
---|
6850 | * Maps the specified guest memory for the given kind of access.
|
---|
6851 | *
|
---|
6852 | * This may be using bounce buffering of the memory if it's crossing a page
|
---|
6853 | * boundary or if there is an access handler installed for any of it. Because
|
---|
6854 | * of lock prefix guarantees, we're in for some extra clutter when this
|
---|
6855 | * happens.
|
---|
6856 | *
|
---|
6857 | * This may raise a \#GP, \#SS, \#PF or \#AC.
|
---|
6858 | *
|
---|
6859 | * @returns VBox strict status code.
|
---|
6860 | *
|
---|
6861 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
6862 | * @param ppvMem Where to return the pointer to the mapped memory.
|
---|
6863 | * @param pbUnmapInfo Where to return unmap info to be passed to
|
---|
6864 | * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
|
---|
6865 | * done.
|
---|
6866 | * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
|
---|
6867 | * 8, 12, 16, 32 or 512. When used by string operations
|
---|
6868 | * it can be up to a page.
|
---|
6869 | * @param iSegReg The index of the segment register to use for this
|
---|
6870 | * access. The base and limits are checked. Use UINT8_MAX
|
---|
6871 | * to indicate that no segmentation is required (for IDT,
|
---|
6872 | * GDT and LDT accesses).
|
---|
6873 | * @param GCPtrMem The address of the guest memory.
|
---|
6874 | * @param fAccess How the memory is being accessed. The
|
---|
6875 | * IEM_ACCESS_TYPE_XXX part is used to figure out how to
|
---|
6876 | * map the memory, while the IEM_ACCESS_WHAT_XXX part is
|
---|
6877 | * used when raising exceptions. The IEM_ACCESS_ATOMIC and
|
---|
6878 | * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
|
---|
6879 | * set.
|
---|
6880 | * @param uAlignCtl Alignment control:
|
---|
6881 | * - Bits 15:0 is the alignment mask.
|
---|
6882 | * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
|
---|
6883 | * IEM_MEMMAP_F_ALIGN_SSE, and
|
---|
6884 | * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
|
---|
6885 | * Pass zero to skip alignment.
|
---|
6886 | */
|
---|
6887 | VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
|
---|
6888 | uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
|
---|
6889 | {
|
---|
6890 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
|
---|
6891 |
|
---|
6892 | /*
|
---|
6893 | * Check the input and figure out which mapping entry to use.
|
---|
6894 | */
|
---|
6895 | Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
|
---|
6896 | Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
|
---|
6897 | || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
|
---|
6898 | Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
|
---|
6899 | Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
|
---|
6900 |
|
---|
6901 | unsigned iMemMap = pVCpu->iem.s.iNextMapping;
|
---|
6902 | if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
6903 | || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
|
---|
6904 | {
|
---|
6905 | iMemMap = iemMemMapFindFree(pVCpu);
|
---|
6906 | AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
|
---|
6907 | ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
|
---|
6908 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
|
---|
6909 | pVCpu->iem.s.aMemMappings[2].fAccess),
|
---|
6910 | VERR_IEM_IPE_9);
|
---|
6911 | }
|
---|
6912 |
|
---|
6913 | /*
|
---|
6914 | * Map the memory, checking that we can actually access it. If something
|
---|
6915 | * slightly complicated happens, fall back on bounce buffering.
|
---|
6916 | */
|
---|
6917 | VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
|
---|
6918 | if (rcStrict == VINF_SUCCESS)
|
---|
6919 | { /* likely */ }
|
---|
6920 | else
|
---|
6921 | return rcStrict;
|
---|
6922 |
|
---|
6923 | if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
|
---|
6924 | { /* likely */ }
|
---|
6925 | else
|
---|
6926 | return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
|
---|
6927 |
|
---|
6928 | /*
|
---|
6929 | * Alignment check.
|
---|
6930 | */
|
---|
6931 | if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
|
---|
6932 | { /* likelyish */ }
|
---|
6933 | else
|
---|
6934 | {
|
---|
6935 | /* Misaligned access. */
|
---|
6936 | if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
|
---|
6937 | {
|
---|
6938 | if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
|
---|
6939 | || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
|
---|
6940 | && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
|
---|
6941 | {
|
---|
6942 | AssertCompile(X86_CR0_AM == X86_EFL_AC);
|
---|
6943 |
|
---|
6944 | if (!iemMemAreAlignmentChecksEnabled(pVCpu))
|
---|
6945 | { /* likely */ }
|
---|
6946 | else
|
---|
6947 | return iemRaiseAlignmentCheckException(pVCpu);
|
---|
6948 | }
|
---|
6949 | else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
|
---|
6950 | && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
|
---|
6951 | /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
|
---|
6952 | * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
|
---|
6953 | * that's what FXSAVE does on a 10980xe. */
|
---|
6954 | && iemMemAreAlignmentChecksEnabled(pVCpu))
|
---|
6955 | return iemRaiseAlignmentCheckException(pVCpu);
|
---|
6956 | else
|
---|
6957 | return iemRaiseGeneralProtectionFault0(pVCpu);
|
---|
6958 | }
|
---|
6959 |
|
---|
6960 | #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
|
---|
6961 | /* If the access is atomic there are host platform alignmnet restrictions
|
---|
6962 | we need to conform with. */
|
---|
6963 | if ( !(fAccess & IEM_ACCESS_ATOMIC)
|
---|
6964 | # if defined(RT_ARCH_AMD64)
|
---|
6965 | || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
|
---|
6966 | # elif defined(RT_ARCH_ARM64)
|
---|
6967 | || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
|
---|
6968 | # else
|
---|
6969 | # error port me
|
---|
6970 | # endif
|
---|
6971 | )
|
---|
6972 | { /* okay */ }
|
---|
6973 | else
|
---|
6974 | {
|
---|
6975 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
|
---|
6976 | pVCpu->iem.s.cMisalignedAtomics += 1;
|
---|
6977 | return VINF_EM_EMULATE_SPLIT_LOCK;
|
---|
6978 | }
|
---|
6979 | #endif
|
---|
6980 | }
|
---|
6981 |
|
---|
6982 | #ifdef IEM_WITH_DATA_TLB
|
---|
6983 | Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
|
---|
6984 |
|
---|
6985 | /*
|
---|
6986 | * Get the TLB entry for this page and check PT flags.
|
---|
6987 | *
|
---|
6988 | * We reload the TLB entry if we need to set the dirty bit (accessed
|
---|
6989 | * should in theory always be set).
|
---|
6990 | */
|
---|
6991 | uint8_t *pbMem = NULL;
|
---|
6992 | uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
|
---|
6993 | PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
|
---|
6994 | uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
|
---|
6995 | if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
|
---|
6996 | && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
|
---|
6997 | || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
|
---|
6998 | && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
|
---|
6999 | {
|
---|
7000 | # ifdef IEM_WITH_TLB_STATISTICS
|
---|
7001 | pVCpu->iem.s.DataTlb.cTlbCoreHits++;
|
---|
7002 | # endif
|
---|
7003 |
|
---|
7004 | /* If the page is either supervisor only or non-writable, we need to do
|
---|
7005 | more careful access checks. */
|
---|
7006 | if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
|
---|
7007 | {
|
---|
7008 | /* Write to read only memory? */
|
---|
7009 | if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
|
---|
7010 | && (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7011 | && ( ( IEM_GET_CPL(pVCpu) == 3
|
---|
7012 | && !(fAccess & IEM_ACCESS_WHAT_SYS))
|
---|
7013 | || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
|
---|
7014 | {
|
---|
7015 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
|
---|
7016 | return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
|
---|
7017 | }
|
---|
7018 |
|
---|
7019 | /* Kernel memory accessed by userland? */
|
---|
7020 | if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
|
---|
7021 | && IEM_GET_CPL(pVCpu) == 3
|
---|
7022 | && !(fAccess & IEM_ACCESS_WHAT_SYS))
|
---|
7023 | {
|
---|
7024 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
|
---|
7025 | return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
|
---|
7026 | }
|
---|
7027 | }
|
---|
7028 |
|
---|
7029 | /* Look up the physical page info if necessary. */
|
---|
7030 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
|
---|
7031 | # ifdef IN_RING3
|
---|
7032 | pbMem = pTlbe->pbMappingR3;
|
---|
7033 | # else
|
---|
7034 | pbMem = NULL;
|
---|
7035 | # endif
|
---|
7036 | else
|
---|
7037 | {
|
---|
7038 | if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
|
---|
7039 | { /* likely */ }
|
---|
7040 | else
|
---|
7041 | IEMTlbInvalidateAllPhysicalSlow(pVCpu);
|
---|
7042 | pTlbe->pbMappingR3 = NULL;
|
---|
7043 | pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
|
---|
7044 | int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
|
---|
7045 | &pbMem, &pTlbe->fFlagsAndPhysRev);
|
---|
7046 | AssertRCReturn(rc, rc);
|
---|
7047 | # ifdef IN_RING3
|
---|
7048 | pTlbe->pbMappingR3 = pbMem;
|
---|
7049 | # endif
|
---|
7050 | }
|
---|
7051 | }
|
---|
7052 | else
|
---|
7053 | {
|
---|
7054 | pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
|
---|
7055 |
|
---|
7056 | /* This page table walking will set A bits as required by the access while performing the walk.
|
---|
7057 | ASSUMES these are set when the address is translated rather than on commit... */
|
---|
7058 | /** @todo testcase: check when A bits are actually set by the CPU for code. */
|
---|
7059 | PGMPTWALKFAST WalkFast;
|
---|
7060 | AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
|
---|
7061 | AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
|
---|
7062 | AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
|
---|
7063 | AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
|
---|
7064 | uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
|
---|
7065 | | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
|
---|
7066 | if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
|
---|
7067 | fQPage |= PGMQPAGE_F_USER_MODE;
|
---|
7068 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
|
---|
7069 | if (RT_SUCCESS(rc))
|
---|
7070 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
7071 | else
|
---|
7072 | {
|
---|
7073 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
|
---|
7074 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
7075 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
7076 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
7077 | # endif
|
---|
7078 | return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
|
---|
7079 | }
|
---|
7080 |
|
---|
7081 | uint32_t fDataBps;
|
---|
7082 | if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
|
---|
7083 | || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
|
---|
7084 | {
|
---|
7085 | if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
|
---|
7086 | || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
|
---|
7087 | {
|
---|
7088 | pTlbe--;
|
---|
7089 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
|
---|
7090 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
7091 | iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
7092 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
7093 | else
|
---|
7094 | ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
|
---|
7095 | # endif
|
---|
7096 | }
|
---|
7097 | else
|
---|
7098 | {
|
---|
7099 | pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
|
---|
7100 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
|
---|
7101 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
7102 | iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
7103 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
7104 | else
|
---|
7105 | ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
|
---|
7106 | # endif
|
---|
7107 | }
|
---|
7108 | }
|
---|
7109 | else
|
---|
7110 | {
|
---|
7111 | /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
|
---|
7112 | to the page with the data access breakpoint armed on it to pass thru here. */
|
---|
7113 | if (fDataBps > 1)
|
---|
7114 | LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
|
---|
7115 | fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
7116 | pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
|
---|
7117 | pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
|
---|
7118 | pTlbe->uTag = uTagNoRev;
|
---|
7119 | }
|
---|
7120 | pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
|
---|
7121 | | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
|
---|
7122 | RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
|
---|
7123 | pTlbe->GCPhys = GCPhysPg;
|
---|
7124 | pTlbe->pbMappingR3 = NULL;
|
---|
7125 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
|
---|
7126 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
|
---|
7127 | Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
|
---|
7128 | || !(fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7129 | || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);
|
---|
7130 | Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
|
---|
7131 | || IEM_GET_CPL(pVCpu) != 3
|
---|
7132 | || (fAccess & IEM_ACCESS_WHAT_SYS));
|
---|
7133 |
|
---|
7134 | if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
|
---|
7135 | {
|
---|
7136 | if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
|
---|
7137 | IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
|
---|
7138 | else
|
---|
7139 | IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
|
---|
7140 | }
|
---|
7141 |
|
---|
7142 | /* Resolve the physical address. */
|
---|
7143 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
|
---|
7144 | rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
|
---|
7145 | &pbMem, &pTlbe->fFlagsAndPhysRev);
|
---|
7146 | AssertRCReturn(rc, rc);
|
---|
7147 | # ifdef IN_RING3
|
---|
7148 | pTlbe->pbMappingR3 = pbMem;
|
---|
7149 | # endif
|
---|
7150 | }
|
---|
7151 |
|
---|
7152 | /*
|
---|
7153 | * Check the physical page level access and mapping.
|
---|
7154 | */
|
---|
7155 | if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
|
---|
7156 | || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
|
---|
7157 | | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
|
---|
7158 | { /* probably likely */ }
|
---|
7159 | else
|
---|
7160 | return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
|
---|
7161 | pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
|
---|
7162 | pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
|
---|
7163 | : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
|
---|
7164 | : VERR_PGM_PHYS_TLB_CATCH_WRITE);
|
---|
7165 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
|
---|
7166 |
|
---|
7167 | if (pbMem)
|
---|
7168 | {
|
---|
7169 | Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
|
---|
7170 | pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
|
---|
7171 | fAccess |= IEM_ACCESS_NOT_LOCKED;
|
---|
7172 | }
|
---|
7173 | else
|
---|
7174 | {
|
---|
7175 | Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
|
---|
7176 | RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
|
---|
7177 | rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7178 | if (rcStrict != VINF_SUCCESS)
|
---|
7179 | return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
|
---|
7180 | }
|
---|
7181 |
|
---|
7182 | void * const pvMem = pbMem;
|
---|
7183 |
|
---|
7184 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7185 | Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
|
---|
7186 | if (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
7187 | Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
|
---|
7188 |
|
---|
7189 | #else /* !IEM_WITH_DATA_TLB */
|
---|
7190 |
|
---|
7191 | RTGCPHYS GCPhysFirst;
|
---|
7192 | rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
|
---|
7193 | if (rcStrict != VINF_SUCCESS)
|
---|
7194 | return rcStrict;
|
---|
7195 |
|
---|
7196 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7197 | Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
|
---|
7198 | if (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
7199 | Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
|
---|
7200 |
|
---|
7201 | void *pvMem;
|
---|
7202 | rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7203 | if (rcStrict != VINF_SUCCESS)
|
---|
7204 | return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
|
---|
7205 |
|
---|
7206 | #endif /* !IEM_WITH_DATA_TLB */
|
---|
7207 |
|
---|
7208 | /*
|
---|
7209 | * Fill in the mapping table entry.
|
---|
7210 | */
|
---|
7211 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
|
---|
7212 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
|
---|
7213 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
7214 | pVCpu->iem.s.cActiveMappings += 1;
|
---|
7215 |
|
---|
7216 | iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
|
---|
7217 | *ppvMem = pvMem;
|
---|
7218 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
7219 | AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
|
---|
7220 | AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
|
---|
7221 |
|
---|
7222 | return VINF_SUCCESS;
|
---|
7223 | }
|
---|
7224 |
|
---|
7225 |
|
---|
7226 | /**
|
---|
7227 | * Commits the guest memory if bounce buffered and unmaps it.
|
---|
7228 | *
|
---|
7229 | * @returns Strict VBox status code.
|
---|
7230 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
7231 | * @param bUnmapInfo Unmap info set by iemMemMap.
|
---|
7232 | */
|
---|
7233 | VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
7234 | {
|
---|
7235 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
7236 | AssertMsgReturn( (bUnmapInfo & 0x08)
|
---|
7237 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
7238 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
|
---|
7239 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
|
---|
7240 | VERR_NOT_FOUND);
|
---|
7241 |
|
---|
7242 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
7243 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
7244 | {
|
---|
7245 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7246 | return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
|
---|
7247 | }
|
---|
7248 | /* Otherwise unlock it. */
|
---|
7249 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
7250 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7251 |
|
---|
7252 | /* Free the entry. */
|
---|
7253 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
7254 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
7255 | pVCpu->iem.s.cActiveMappings--;
|
---|
7256 | return VINF_SUCCESS;
|
---|
7257 | }
|
---|
7258 |
|
---|
7259 |
|
---|
7260 | /**
|
---|
7261 | * Rolls back the guest memory (conceptually only) and unmaps it.
|
---|
7262 | *
|
---|
7263 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
7264 | * @param bUnmapInfo Unmap info set by iemMemMap.
|
---|
7265 | */
|
---|
7266 | void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
7267 | {
|
---|
7268 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
7269 | AssertMsgReturnVoid( (bUnmapInfo & 0x08)
|
---|
7270 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
7271 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
7272 | == ((unsigned)bUnmapInfo >> 4),
|
---|
7273 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
|
---|
7274 |
|
---|
7275 | /* Unlock it if necessary. */
|
---|
7276 | if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
7277 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7278 |
|
---|
7279 | /* Free the entry. */
|
---|
7280 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
7281 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
7282 | pVCpu->iem.s.cActiveMappings--;
|
---|
7283 | }
|
---|
7284 |
|
---|
7285 | #ifdef IEM_WITH_SETJMP
|
---|
7286 |
|
---|
7287 | /**
|
---|
7288 | * Maps the specified guest memory for the given kind of access, longjmp on
|
---|
7289 | * error.
|
---|
7290 | *
|
---|
7291 | * This may be using bounce buffering of the memory if it's crossing a page
|
---|
7292 | * boundary or if there is an access handler installed for any of it. Because
|
---|
7293 | * of lock prefix guarantees, we're in for some extra clutter when this
|
---|
7294 | * happens.
|
---|
7295 | *
|
---|
7296 | * This may raise a \#GP, \#SS, \#PF or \#AC.
|
---|
7297 | *
|
---|
7298 | * @returns Pointer to the mapped memory.
|
---|
7299 | *
|
---|
7300 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
7301 | * @param bUnmapInfo Where to return unmap info to be passed to
|
---|
7302 | * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
|
---|
7303 | * iemMemCommitAndUnmapWoSafeJmp,
|
---|
7304 | * iemMemCommitAndUnmapRoSafeJmp,
|
---|
7305 | * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
|
---|
7306 | * when done.
|
---|
7307 | * @param cbMem The number of bytes to map. This is usually 1,
|
---|
7308 | * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
|
---|
7309 | * string operations it can be up to a page.
|
---|
7310 | * @param iSegReg The index of the segment register to use for
|
---|
7311 | * this access. The base and limits are checked.
|
---|
7312 | * Use UINT8_MAX to indicate that no segmentation
|
---|
7313 | * is required (for IDT, GDT and LDT accesses).
|
---|
7314 | * @param GCPtrMem The address of the guest memory.
|
---|
7315 | * @param fAccess How the memory is being accessed. The
|
---|
7316 | * IEM_ACCESS_TYPE_XXX part is used to figure out how to
|
---|
7317 | * map the memory, while the IEM_ACCESS_WHAT_XXX part is
|
---|
7318 | * used when raising exceptions. The IEM_ACCESS_ATOMIC and
|
---|
7319 | * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
|
---|
7320 | * set.
|
---|
7321 | * @param uAlignCtl Alignment control:
|
---|
7322 | * - Bits 15:0 is the alignment mask.
|
---|
7323 | * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
|
---|
7324 | * IEM_MEMMAP_F_ALIGN_SSE, and
|
---|
7325 | * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
|
---|
7326 | * Pass zero to skip alignment.
|
---|
7327 | * @tparam a_fSafe Whether this is a call from "safe" fallback function in
|
---|
7328 | * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
|
---|
7329 | * needs counting as such in the statistics.
|
---|
7330 | */
|
---|
7331 | template<bool a_fSafeCall = false>
|
---|
7332 | static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
|
---|
7333 | uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
7334 | {
|
---|
7335 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
|
---|
7336 |
|
---|
7337 | /*
|
---|
7338 | * Check the input, check segment access and adjust address
|
---|
7339 | * with segment base.
|
---|
7340 | */
|
---|
7341 | Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
|
---|
7342 | Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
|
---|
7343 | Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
|
---|
7344 |
|
---|
7345 | VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
|
---|
7346 | if (rcStrict == VINF_SUCCESS) { /*likely*/ }
|
---|
7347 | else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
7348 |
|
---|
7349 | /*
|
---|
7350 | * Alignment check.
|
---|
7351 | */
|
---|
7352 | if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
|
---|
7353 | { /* likelyish */ }
|
---|
7354 | else
|
---|
7355 | {
|
---|
7356 | /* Misaligned access. */
|
---|
7357 | if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
|
---|
7358 | {
|
---|
7359 | if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
|
---|
7360 | || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
|
---|
7361 | && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
|
---|
7362 | {
|
---|
7363 | AssertCompile(X86_CR0_AM == X86_EFL_AC);
|
---|
7364 |
|
---|
7365 | if (iemMemAreAlignmentChecksEnabled(pVCpu))
|
---|
7366 | iemRaiseAlignmentCheckExceptionJmp(pVCpu);
|
---|
7367 | }
|
---|
7368 | else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
|
---|
7369 | && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
|
---|
7370 | /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
|
---|
7371 | * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
|
---|
7372 | * that's what FXSAVE does on a 10980xe. */
|
---|
7373 | && iemMemAreAlignmentChecksEnabled(pVCpu))
|
---|
7374 | iemRaiseAlignmentCheckExceptionJmp(pVCpu);
|
---|
7375 | else
|
---|
7376 | iemRaiseGeneralProtectionFault0Jmp(pVCpu);
|
---|
7377 | }
|
---|
7378 |
|
---|
7379 | #if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
|
---|
7380 | /* If the access is atomic there are host platform alignmnet restrictions
|
---|
7381 | we need to conform with. */
|
---|
7382 | if ( !(fAccess & IEM_ACCESS_ATOMIC)
|
---|
7383 | # if defined(RT_ARCH_AMD64)
|
---|
7384 | || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
|
---|
7385 | # elif defined(RT_ARCH_ARM64)
|
---|
7386 | || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
|
---|
7387 | # else
|
---|
7388 | # error port me
|
---|
7389 | # endif
|
---|
7390 | )
|
---|
7391 | { /* okay */ }
|
---|
7392 | else
|
---|
7393 | {
|
---|
7394 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
|
---|
7395 | pVCpu->iem.s.cMisalignedAtomics += 1;
|
---|
7396 | IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
|
---|
7397 | }
|
---|
7398 | #endif
|
---|
7399 | }
|
---|
7400 |
|
---|
7401 | /*
|
---|
7402 | * Figure out which mapping entry to use.
|
---|
7403 | */
|
---|
7404 | unsigned iMemMap = pVCpu->iem.s.iNextMapping;
|
---|
7405 | if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
7406 | || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
|
---|
7407 | {
|
---|
7408 | iMemMap = iemMemMapFindFree(pVCpu);
|
---|
7409 | AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
|
---|
7410 | ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
|
---|
7411 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
|
---|
7412 | pVCpu->iem.s.aMemMappings[2].fAccess),
|
---|
7413 | IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
|
---|
7414 | }
|
---|
7415 |
|
---|
7416 | /*
|
---|
7417 | * Crossing a page boundary?
|
---|
7418 | */
|
---|
7419 | if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
|
---|
7420 | { /* No (likely). */ }
|
---|
7421 | else
|
---|
7422 | {
|
---|
7423 | void *pvMem;
|
---|
7424 | rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
|
---|
7425 | if (rcStrict == VINF_SUCCESS)
|
---|
7426 | return pvMem;
|
---|
7427 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
7428 | }
|
---|
7429 |
|
---|
7430 | #ifdef IEM_WITH_DATA_TLB
|
---|
7431 | Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
|
---|
7432 |
|
---|
7433 | /*
|
---|
7434 | * Get the TLB entry for this page checking that it has the A & D bits
|
---|
7435 | * set as per fAccess flags.
|
---|
7436 | */
|
---|
7437 | /** @todo make the caller pass these in with fAccess. */
|
---|
7438 | uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
|
---|
7439 | ? IEMTLBE_F_PT_NO_USER : 0;
|
---|
7440 | uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
|
---|
7441 | ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
|
---|
7442 | | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
|
---|
7443 | || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
|
---|
7444 | ? IEMTLBE_F_PT_NO_WRITE : 0)
|
---|
7445 | : 0;
|
---|
7446 | uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
|
---|
7447 | uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
|
---|
7448 | PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
|
---|
7449 | uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
|
---|
7450 | if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
|
---|
7451 | && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
|
---|
7452 | || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
|
---|
7453 | && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
|
---|
7454 | {
|
---|
7455 | # ifdef IEM_WITH_TLB_STATISTICS
|
---|
7456 | if (a_fSafeCall)
|
---|
7457 | pVCpu->iem.s.DataTlb.cTlbSafeHits++;
|
---|
7458 | else
|
---|
7459 | pVCpu->iem.s.DataTlb.cTlbCoreHits++;
|
---|
7460 | # endif
|
---|
7461 | }
|
---|
7462 | else
|
---|
7463 | {
|
---|
7464 | if (a_fSafeCall)
|
---|
7465 | pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
|
---|
7466 | else
|
---|
7467 | pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
|
---|
7468 |
|
---|
7469 | /* This page table walking will set A and D bits as required by the
|
---|
7470 | access while performing the walk.
|
---|
7471 | ASSUMES these are set when the address is translated rather than on commit... */
|
---|
7472 | /** @todo testcase: check when A and D bits are actually set by the CPU. */
|
---|
7473 | PGMPTWALKFAST WalkFast;
|
---|
7474 | AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
|
---|
7475 | AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
|
---|
7476 | AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
|
---|
7477 | AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
|
---|
7478 | uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
|
---|
7479 | | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
|
---|
7480 | if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
|
---|
7481 | fQPage |= PGMQPAGE_F_USER_MODE;
|
---|
7482 | int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
|
---|
7483 | if (RT_SUCCESS(rc))
|
---|
7484 | Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
|
---|
7485 | else
|
---|
7486 | {
|
---|
7487 | LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
|
---|
7488 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
7489 | if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
|
---|
7490 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
|
---|
7491 | # endif
|
---|
7492 | iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
|
---|
7493 | }
|
---|
7494 |
|
---|
7495 | uint32_t fDataBps;
|
---|
7496 | if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
|
---|
7497 | || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
|
---|
7498 | {
|
---|
7499 | if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
|
---|
7500 | || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
|
---|
7501 | {
|
---|
7502 | pTlbe--;
|
---|
7503 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
|
---|
7504 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
7505 | iemTlbLoadedLargePage<false>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
7506 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
7507 | else
|
---|
7508 | ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev));
|
---|
7509 | # endif
|
---|
7510 | }
|
---|
7511 | else
|
---|
7512 | {
|
---|
7513 | if (a_fSafeCall)
|
---|
7514 | pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
|
---|
7515 | else
|
---|
7516 | pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
|
---|
7517 | pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
|
---|
7518 | if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
|
---|
7519 | iemTlbLoadedLargePage<true>(pVCpu, &pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
|
---|
7520 | # ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
|
---|
7521 | else
|
---|
7522 | ASMBitClear(pVCpu->iem.s.DataTlb.bmLargePage, IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev) + 1);
|
---|
7523 | # endif
|
---|
7524 | }
|
---|
7525 | }
|
---|
7526 | else
|
---|
7527 | {
|
---|
7528 | /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
|
---|
7529 | to the page with the data access breakpoint armed on it to pass thru here. */
|
---|
7530 | if (fDataBps > 1)
|
---|
7531 | LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
|
---|
7532 | a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
7533 | pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
|
---|
7534 | pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
|
---|
7535 | pTlbe->uTag = uTagNoRev;
|
---|
7536 | }
|
---|
7537 | pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
|
---|
7538 | | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
|
---|
7539 | RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
|
---|
7540 | pTlbe->GCPhys = GCPhysPg;
|
---|
7541 | pTlbe->pbMappingR3 = NULL;
|
---|
7542 | Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
|
---|
7543 | Assert( !(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
|
---|
7544 | || (fQPage & (PGMQPAGE_F_CR0_WP0 | PGMQPAGE_F_USER_MODE)) == PGMQPAGE_F_CR0_WP0);
|
---|
7545 | Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
|
---|
7546 |
|
---|
7547 | if (pTlbe != &pVCpu->iem.s.DataBreakpointTlbe)
|
---|
7548 | {
|
---|
7549 | if (!((uintptr_t)pTlbe & (sizeof(*pTlbe) * 2 - 1)))
|
---|
7550 | IEMTLBTRACE_LOAD( pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
|
---|
7551 | else
|
---|
7552 | IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, pTlbe->GCPhys, (uint32_t)pTlbe->fFlagsAndPhysRev, true);
|
---|
7553 | }
|
---|
7554 |
|
---|
7555 | /* Resolve the physical address. */
|
---|
7556 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
|
---|
7557 | uint8_t *pbMemFullLoad = NULL;
|
---|
7558 | rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
|
---|
7559 | &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
|
---|
7560 | AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
|
---|
7561 | # ifdef IN_RING3
|
---|
7562 | pTlbe->pbMappingR3 = pbMemFullLoad;
|
---|
7563 | # endif
|
---|
7564 | }
|
---|
7565 |
|
---|
7566 | /*
|
---|
7567 | * Check the flags and physical revision.
|
---|
7568 | * Note! This will revalidate the uTlbPhysRev after a full load. This is
|
---|
7569 | * just to keep the code structure simple (i.e. avoid gotos or similar).
|
---|
7570 | */
|
---|
7571 | uint8_t *pbMem;
|
---|
7572 | if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
|
---|
7573 | == pVCpu->iem.s.DataTlb.uTlbPhysRev)
|
---|
7574 | # ifdef IN_RING3
|
---|
7575 | pbMem = pTlbe->pbMappingR3;
|
---|
7576 | # else
|
---|
7577 | pbMem = NULL;
|
---|
7578 | # endif
|
---|
7579 | else
|
---|
7580 | {
|
---|
7581 | Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
|
---|
7582 |
|
---|
7583 | /*
|
---|
7584 | * Okay, something isn't quite right or needs refreshing.
|
---|
7585 | */
|
---|
7586 | /* Write to read only memory? */
|
---|
7587 | if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
|
---|
7588 | {
|
---|
7589 | LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
|
---|
7590 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
7591 | /** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
|
---|
7592 | * to trigger an \#PG or a VM nested paging exit here yet! */
|
---|
7593 | if (Walk.fFailed & PGM_WALKFAIL_EPT)
|
---|
7594 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
|
---|
7595 | # endif
|
---|
7596 | iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
|
---|
7597 | }
|
---|
7598 |
|
---|
7599 | /* Kernel memory accessed by userland? */
|
---|
7600 | if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
|
---|
7601 | {
|
---|
7602 | LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
|
---|
7603 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
7604 | /** @todo TLB: See above. */
|
---|
7605 | if (Walk.fFailed & PGM_WALKFAIL_EPT)
|
---|
7606 | IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
|
---|
7607 | # endif
|
---|
7608 | iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
|
---|
7609 | }
|
---|
7610 |
|
---|
7611 | /*
|
---|
7612 | * Check if the physical page info needs updating.
|
---|
7613 | */
|
---|
7614 | if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
|
---|
7615 | # ifdef IN_RING3
|
---|
7616 | pbMem = pTlbe->pbMappingR3;
|
---|
7617 | # else
|
---|
7618 | pbMem = NULL;
|
---|
7619 | # endif
|
---|
7620 | else
|
---|
7621 | {
|
---|
7622 | pTlbe->pbMappingR3 = NULL;
|
---|
7623 | pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
|
---|
7624 | pbMem = NULL;
|
---|
7625 | int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
|
---|
7626 | &pbMem, &pTlbe->fFlagsAndPhysRev);
|
---|
7627 | AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
|
---|
7628 | # ifdef IN_RING3
|
---|
7629 | pTlbe->pbMappingR3 = pbMem;
|
---|
7630 | # endif
|
---|
7631 | }
|
---|
7632 |
|
---|
7633 | /*
|
---|
7634 | * Check the physical page level access and mapping.
|
---|
7635 | */
|
---|
7636 | if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
|
---|
7637 | { /* probably likely */ }
|
---|
7638 | else
|
---|
7639 | {
|
---|
7640 | rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
|
---|
7641 | pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
|
---|
7642 | pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
|
---|
7643 | : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
|
---|
7644 | : VERR_PGM_PHYS_TLB_CATCH_WRITE);
|
---|
7645 | if (rcStrict == VINF_SUCCESS)
|
---|
7646 | return pbMem;
|
---|
7647 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
7648 | }
|
---|
7649 | }
|
---|
7650 | Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
|
---|
7651 |
|
---|
7652 | if (pbMem)
|
---|
7653 | {
|
---|
7654 | Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
|
---|
7655 | pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
|
---|
7656 | fAccess |= IEM_ACCESS_NOT_LOCKED;
|
---|
7657 | }
|
---|
7658 | else
|
---|
7659 | {
|
---|
7660 | Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
|
---|
7661 | RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
|
---|
7662 | rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7663 | if (rcStrict == VINF_SUCCESS)
|
---|
7664 | {
|
---|
7665 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
7666 | return pbMem;
|
---|
7667 | }
|
---|
7668 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
7669 | }
|
---|
7670 |
|
---|
7671 | void * const pvMem = pbMem;
|
---|
7672 |
|
---|
7673 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7674 | Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
|
---|
7675 | if (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
7676 | Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
|
---|
7677 |
|
---|
7678 | #else /* !IEM_WITH_DATA_TLB */
|
---|
7679 |
|
---|
7680 |
|
---|
7681 | RTGCPHYS GCPhysFirst;
|
---|
7682 | rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
|
---|
7683 | if (rcStrict == VINF_SUCCESS) { /*likely*/ }
|
---|
7684 | else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
7685 |
|
---|
7686 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7687 | Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
|
---|
7688 | if (fAccess & IEM_ACCESS_TYPE_READ)
|
---|
7689 | Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
|
---|
7690 |
|
---|
7691 | void *pvMem;
|
---|
7692 | rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7693 | if (rcStrict == VINF_SUCCESS)
|
---|
7694 | { /* likely */ }
|
---|
7695 | else
|
---|
7696 | {
|
---|
7697 | rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
|
---|
7698 | if (rcStrict == VINF_SUCCESS)
|
---|
7699 | return pvMem;
|
---|
7700 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
7701 | }
|
---|
7702 |
|
---|
7703 | #endif /* !IEM_WITH_DATA_TLB */
|
---|
7704 |
|
---|
7705 | /*
|
---|
7706 | * Fill in the mapping table entry.
|
---|
7707 | */
|
---|
7708 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
|
---|
7709 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
|
---|
7710 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
7711 | pVCpu->iem.s.cActiveMappings++;
|
---|
7712 |
|
---|
7713 | iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
|
---|
7714 |
|
---|
7715 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
7716 | return pvMem;
|
---|
7717 | }
|
---|
7718 |
|
---|
7719 |
|
---|
7720 | /** @see iemMemMapJmp */
|
---|
7721 | static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
|
---|
7722 | uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
7723 | {
|
---|
7724 | return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
|
---|
7725 | }
|
---|
7726 |
|
---|
7727 |
|
---|
7728 | /**
|
---|
7729 | * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
|
---|
7730 | *
|
---|
7731 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
7732 | * @param pvMem The mapping.
|
---|
7733 | * @param fAccess The kind of access.
|
---|
7734 | */
|
---|
7735 | void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
7736 | {
|
---|
7737 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
7738 | AssertMsgReturnVoid( (bUnmapInfo & 0x08)
|
---|
7739 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
7740 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
7741 | == ((unsigned)bUnmapInfo >> 4),
|
---|
7742 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
|
---|
7743 |
|
---|
7744 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
7745 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
7746 | {
|
---|
7747 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7748 | {
|
---|
7749 | VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
|
---|
7750 | if (rcStrict == VINF_SUCCESS)
|
---|
7751 | return;
|
---|
7752 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
7753 | }
|
---|
7754 | }
|
---|
7755 | /* Otherwise unlock it. */
|
---|
7756 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
7757 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7758 |
|
---|
7759 | /* Free the entry. */
|
---|
7760 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
7761 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
7762 | pVCpu->iem.s.cActiveMappings--;
|
---|
7763 | }
|
---|
7764 |
|
---|
7765 |
|
---|
7766 | /** Fallback for iemMemCommitAndUnmapRwJmp. */
|
---|
7767 | void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
7768 | {
|
---|
7769 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
|
---|
7770 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
7771 | }
|
---|
7772 |
|
---|
7773 |
|
---|
7774 | /** Fallback for iemMemCommitAndUnmapAtJmp. */
|
---|
7775 | void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
7776 | {
|
---|
7777 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
|
---|
7778 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
7779 | }
|
---|
7780 |
|
---|
7781 |
|
---|
7782 | /** Fallback for iemMemCommitAndUnmapWoJmp. */
|
---|
7783 | void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
7784 | {
|
---|
7785 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
|
---|
7786 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
7787 | }
|
---|
7788 |
|
---|
7789 |
|
---|
7790 | /** Fallback for iemMemCommitAndUnmapRoJmp. */
|
---|
7791 | void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
7792 | {
|
---|
7793 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
|
---|
7794 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
7795 | }
|
---|
7796 |
|
---|
7797 |
|
---|
7798 | /** Fallback for iemMemRollbackAndUnmapWo. */
|
---|
7799 | void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
7800 | {
|
---|
7801 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
|
---|
7802 | iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
|
---|
7803 | }
|
---|
7804 |
|
---|
7805 | #endif /* IEM_WITH_SETJMP */
|
---|
7806 |
|
---|
7807 | #ifndef IN_RING3
|
---|
7808 | /**
|
---|
7809 | * Commits the guest memory if bounce buffered and unmaps it, if any bounce
|
---|
7810 | * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
|
---|
7811 | *
|
---|
7812 | * Allows the instruction to be completed and retired, while the IEM user will
|
---|
7813 | * return to ring-3 immediately afterwards and do the postponed writes there.
|
---|
7814 | *
|
---|
7815 | * @returns VBox status code (no strict statuses). Caller must check
|
---|
7816 | * VMCPU_FF_IEM before repeating string instructions and similar stuff.
|
---|
7817 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
7818 | * @param pvMem The mapping.
|
---|
7819 | * @param fAccess The kind of access.
|
---|
7820 | */
|
---|
7821 | VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
7822 | {
|
---|
7823 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
7824 | AssertMsgReturn( (bUnmapInfo & 0x08)
|
---|
7825 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
7826 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
7827 | == ((unsigned)bUnmapInfo >> 4),
|
---|
7828 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
|
---|
7829 | VERR_NOT_FOUND);
|
---|
7830 |
|
---|
7831 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
7832 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
7833 | {
|
---|
7834 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
7835 | return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
|
---|
7836 | }
|
---|
7837 | /* Otherwise unlock it. */
|
---|
7838 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
7839 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7840 |
|
---|
7841 | /* Free the entry. */
|
---|
7842 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
7843 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
7844 | pVCpu->iem.s.cActiveMappings--;
|
---|
7845 | return VINF_SUCCESS;
|
---|
7846 | }
|
---|
7847 | #endif
|
---|
7848 |
|
---|
7849 |
|
---|
7850 | /**
|
---|
7851 | * Rollbacks mappings, releasing page locks and such.
|
---|
7852 | *
|
---|
7853 | * The caller shall only call this after checking cActiveMappings.
|
---|
7854 | *
|
---|
7855 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
7856 | */
|
---|
7857 | void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
7858 | {
|
---|
7859 | Assert(pVCpu->iem.s.cActiveMappings > 0);
|
---|
7860 |
|
---|
7861 | uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
|
---|
7862 | while (iMemMap-- > 0)
|
---|
7863 | {
|
---|
7864 | uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
|
---|
7865 | if (fAccess != IEM_ACCESS_INVALID)
|
---|
7866 | {
|
---|
7867 | AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
|
---|
7868 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
7869 | if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
|
---|
7870 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
7871 | AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
|
---|
7872 | ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
|
---|
7873 | iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
|
---|
7874 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
|
---|
7875 | pVCpu->iem.s.cActiveMappings--;
|
---|
7876 | }
|
---|
7877 | }
|
---|
7878 | }
|
---|
7879 |
|
---|
7880 |
|
---|
7881 | /*
|
---|
7882 | * Instantiate R/W templates.
|
---|
7883 | */
|
---|
7884 | #define TMPL_MEM_WITH_STACK
|
---|
7885 |
|
---|
7886 | #define TMPL_MEM_TYPE uint8_t
|
---|
7887 | #define TMPL_MEM_FN_SUFF U8
|
---|
7888 | #define TMPL_MEM_FMT_TYPE "%#04x"
|
---|
7889 | #define TMPL_MEM_FMT_DESC "byte"
|
---|
7890 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7891 |
|
---|
7892 | #define TMPL_MEM_TYPE uint16_t
|
---|
7893 | #define TMPL_MEM_FN_SUFF U16
|
---|
7894 | #define TMPL_MEM_FMT_TYPE "%#06x"
|
---|
7895 | #define TMPL_MEM_FMT_DESC "word"
|
---|
7896 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7897 |
|
---|
7898 | #define TMPL_WITH_PUSH_SREG
|
---|
7899 | #define TMPL_MEM_TYPE uint32_t
|
---|
7900 | #define TMPL_MEM_FN_SUFF U32
|
---|
7901 | #define TMPL_MEM_FMT_TYPE "%#010x"
|
---|
7902 | #define TMPL_MEM_FMT_DESC "dword"
|
---|
7903 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7904 | #undef TMPL_WITH_PUSH_SREG
|
---|
7905 |
|
---|
7906 | #define TMPL_MEM_TYPE uint64_t
|
---|
7907 | #define TMPL_MEM_FN_SUFF U64
|
---|
7908 | #define TMPL_MEM_FMT_TYPE "%#018RX64"
|
---|
7909 | #define TMPL_MEM_FMT_DESC "qword"
|
---|
7910 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7911 |
|
---|
7912 | #undef TMPL_MEM_WITH_STACK
|
---|
7913 |
|
---|
7914 | #define TMPL_MEM_TYPE uint32_t
|
---|
7915 | #define TMPL_MEM_TYPE_ALIGN 0
|
---|
7916 | #define TMPL_MEM_FN_SUFF U32NoAc
|
---|
7917 | #define TMPL_MEM_FMT_TYPE "%#010x"
|
---|
7918 | #define TMPL_MEM_FMT_DESC "dword"
|
---|
7919 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7920 | #undef TMPL_WITH_PUSH_SREG
|
---|
7921 |
|
---|
7922 | #define TMPL_MEM_TYPE uint64_t
|
---|
7923 | #define TMPL_MEM_TYPE_ALIGN 0
|
---|
7924 | #define TMPL_MEM_FN_SUFF U64NoAc
|
---|
7925 | #define TMPL_MEM_FMT_TYPE "%#018RX64"
|
---|
7926 | #define TMPL_MEM_FMT_DESC "qword"
|
---|
7927 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7928 |
|
---|
7929 | #define TMPL_MEM_TYPE uint64_t
|
---|
7930 | #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
|
---|
7931 | #define TMPL_MEM_FN_SUFF U64AlignedU128
|
---|
7932 | #define TMPL_MEM_FMT_TYPE "%#018RX64"
|
---|
7933 | #define TMPL_MEM_FMT_DESC "qword"
|
---|
7934 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7935 |
|
---|
7936 | /* See IEMAllMemRWTmplInline.cpp.h */
|
---|
7937 | #define TMPL_MEM_BY_REF
|
---|
7938 |
|
---|
7939 | #define TMPL_MEM_TYPE RTFLOAT80U
|
---|
7940 | #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
|
---|
7941 | #define TMPL_MEM_FN_SUFF R80
|
---|
7942 | #define TMPL_MEM_FMT_TYPE "%.10Rhxs"
|
---|
7943 | #define TMPL_MEM_FMT_DESC "tword"
|
---|
7944 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7945 |
|
---|
7946 | #define TMPL_MEM_TYPE RTPBCD80U
|
---|
7947 | #define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
|
---|
7948 | #define TMPL_MEM_FN_SUFF D80
|
---|
7949 | #define TMPL_MEM_FMT_TYPE "%.10Rhxs"
|
---|
7950 | #define TMPL_MEM_FMT_DESC "tword"
|
---|
7951 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7952 |
|
---|
7953 | #define TMPL_MEM_TYPE RTUINT128U
|
---|
7954 | #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
|
---|
7955 | #define TMPL_MEM_FN_SUFF U128
|
---|
7956 | #define TMPL_MEM_FMT_TYPE "%.16Rhxs"
|
---|
7957 | #define TMPL_MEM_FMT_DESC "dqword"
|
---|
7958 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7959 |
|
---|
7960 | #define TMPL_MEM_TYPE RTUINT128U
|
---|
7961 | #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
|
---|
7962 | #define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
|
---|
7963 | #define TMPL_MEM_FN_SUFF U128AlignedSse
|
---|
7964 | #define TMPL_MEM_FMT_TYPE "%.16Rhxs"
|
---|
7965 | #define TMPL_MEM_FMT_DESC "dqword"
|
---|
7966 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7967 |
|
---|
7968 | #define TMPL_MEM_TYPE RTUINT128U
|
---|
7969 | #define TMPL_MEM_TYPE_ALIGN 0
|
---|
7970 | #define TMPL_MEM_FN_SUFF U128NoAc
|
---|
7971 | #define TMPL_MEM_FMT_TYPE "%.16Rhxs"
|
---|
7972 | #define TMPL_MEM_FMT_DESC "dqword"
|
---|
7973 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7974 |
|
---|
7975 | #define TMPL_MEM_TYPE RTUINT256U
|
---|
7976 | #define TMPL_MEM_TYPE_ALIGN 0
|
---|
7977 | #define TMPL_MEM_FN_SUFF U256NoAc
|
---|
7978 | #define TMPL_MEM_FMT_TYPE "%.32Rhxs"
|
---|
7979 | #define TMPL_MEM_FMT_DESC "qqword"
|
---|
7980 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7981 |
|
---|
7982 | #define TMPL_MEM_TYPE RTUINT256U
|
---|
7983 | #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
|
---|
7984 | #define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
|
---|
7985 | #define TMPL_MEM_FN_SUFF U256AlignedAvx
|
---|
7986 | #define TMPL_MEM_FMT_TYPE "%.32Rhxs"
|
---|
7987 | #define TMPL_MEM_FMT_DESC "qqword"
|
---|
7988 | #include "IEMAllMemRWTmpl.cpp.h"
|
---|
7989 |
|
---|
7990 | /**
|
---|
7991 | * Fetches a data dword and zero extends it to a qword.
|
---|
7992 | *
|
---|
7993 | * @returns Strict VBox status code.
|
---|
7994 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
7995 | * @param pu64Dst Where to return the qword.
|
---|
7996 | * @param iSegReg The index of the segment register to use for
|
---|
7997 | * this access. The base and limits are checked.
|
---|
7998 | * @param GCPtrMem The address of the guest memory.
|
---|
7999 | */
|
---|
8000 | VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
8001 | {
|
---|
8002 | /* The lazy approach for now... */
|
---|
8003 | uint8_t bUnmapInfo;
|
---|
8004 | uint32_t const *pu32Src;
|
---|
8005 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
|
---|
8006 | IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
|
---|
8007 | if (rc == VINF_SUCCESS)
|
---|
8008 | {
|
---|
8009 | *pu64Dst = *pu32Src;
|
---|
8010 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8011 | Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
|
---|
8012 | }
|
---|
8013 | return rc;
|
---|
8014 | }
|
---|
8015 |
|
---|
8016 |
|
---|
8017 | #ifdef SOME_UNUSED_FUNCTION
|
---|
8018 | /**
|
---|
8019 | * Fetches a data dword and sign extends it to a qword.
|
---|
8020 | *
|
---|
8021 | * @returns Strict VBox status code.
|
---|
8022 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8023 | * @param pu64Dst Where to return the sign extended value.
|
---|
8024 | * @param iSegReg The index of the segment register to use for
|
---|
8025 | * this access. The base and limits are checked.
|
---|
8026 | * @param GCPtrMem The address of the guest memory.
|
---|
8027 | */
|
---|
8028 | VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
8029 | {
|
---|
8030 | /* The lazy approach for now... */
|
---|
8031 | uint8_t bUnmapInfo;
|
---|
8032 | int32_t const *pi32Src;
|
---|
8033 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
|
---|
8034 | IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
|
---|
8035 | if (rc == VINF_SUCCESS)
|
---|
8036 | {
|
---|
8037 | *pu64Dst = *pi32Src;
|
---|
8038 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8039 | Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
|
---|
8040 | }
|
---|
8041 | #ifdef __GNUC__ /* warning: GCC may be a royal pain */
|
---|
8042 | else
|
---|
8043 | *pu64Dst = 0;
|
---|
8044 | #endif
|
---|
8045 | return rc;
|
---|
8046 | }
|
---|
8047 | #endif
|
---|
8048 |
|
---|
8049 |
|
---|
8050 | /**
|
---|
8051 | * Fetches a descriptor register (lgdt, lidt).
|
---|
8052 | *
|
---|
8053 | * @returns Strict VBox status code.
|
---|
8054 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8055 | * @param pcbLimit Where to return the limit.
|
---|
8056 | * @param pGCPtrBase Where to return the base.
|
---|
8057 | * @param iSegReg The index of the segment register to use for
|
---|
8058 | * this access. The base and limits are checked.
|
---|
8059 | * @param GCPtrMem The address of the guest memory.
|
---|
8060 | * @param enmOpSize The effective operand size.
|
---|
8061 | */
|
---|
8062 | VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
|
---|
8063 | RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
|
---|
8064 | {
|
---|
8065 | /*
|
---|
8066 | * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
|
---|
8067 | * little special:
|
---|
8068 | * - The two reads are done separately.
|
---|
8069 | * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
|
---|
8070 | * - We suspect the 386 to actually commit the limit before the base in
|
---|
8071 | * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
|
---|
8072 | * don't try emulate this eccentric behavior, because it's not well
|
---|
8073 | * enough understood and rather hard to trigger.
|
---|
8074 | * - The 486 seems to do a dword limit read when the operand size is 32-bit.
|
---|
8075 | */
|
---|
8076 | VBOXSTRICTRC rcStrict;
|
---|
8077 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
8078 | {
|
---|
8079 | rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
|
---|
8080 | if (rcStrict == VINF_SUCCESS)
|
---|
8081 | rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
|
---|
8082 | }
|
---|
8083 | else
|
---|
8084 | {
|
---|
8085 | uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
|
---|
8086 | if (enmOpSize == IEMMODE_32BIT)
|
---|
8087 | {
|
---|
8088 | if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
|
---|
8089 | {
|
---|
8090 | rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
|
---|
8091 | if (rcStrict == VINF_SUCCESS)
|
---|
8092 | rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
|
---|
8093 | }
|
---|
8094 | else
|
---|
8095 | {
|
---|
8096 | rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
|
---|
8097 | if (rcStrict == VINF_SUCCESS)
|
---|
8098 | {
|
---|
8099 | *pcbLimit = (uint16_t)uTmp;
|
---|
8100 | rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
|
---|
8101 | }
|
---|
8102 | }
|
---|
8103 | if (rcStrict == VINF_SUCCESS)
|
---|
8104 | *pGCPtrBase = uTmp;
|
---|
8105 | }
|
---|
8106 | else
|
---|
8107 | {
|
---|
8108 | rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
|
---|
8109 | if (rcStrict == VINF_SUCCESS)
|
---|
8110 | {
|
---|
8111 | rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
|
---|
8112 | if (rcStrict == VINF_SUCCESS)
|
---|
8113 | *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
|
---|
8114 | }
|
---|
8115 | }
|
---|
8116 | }
|
---|
8117 | return rcStrict;
|
---|
8118 | }
|
---|
8119 |
|
---|
8120 |
|
---|
8121 | /**
|
---|
8122 | * Stores a data dqword, SSE aligned.
|
---|
8123 | *
|
---|
8124 | * @returns Strict VBox status code.
|
---|
8125 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8126 | * @param iSegReg The index of the segment register to use for
|
---|
8127 | * this access. The base and limits are checked.
|
---|
8128 | * @param GCPtrMem The address of the guest memory.
|
---|
8129 | * @param u128Value The value to store.
|
---|
8130 | */
|
---|
8131 | VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
|
---|
8132 | {
|
---|
8133 | /* The lazy approach for now... */
|
---|
8134 | uint8_t bUnmapInfo;
|
---|
8135 | PRTUINT128U pu128Dst;
|
---|
8136 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
|
---|
8137 | (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
|
---|
8138 | if (rc == VINF_SUCCESS)
|
---|
8139 | {
|
---|
8140 | pu128Dst->au64[0] = u128Value.au64[0];
|
---|
8141 | pu128Dst->au64[1] = u128Value.au64[1];
|
---|
8142 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8143 | Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
|
---|
8144 | }
|
---|
8145 | return rc;
|
---|
8146 | }
|
---|
8147 |
|
---|
8148 |
|
---|
8149 | #ifdef IEM_WITH_SETJMP
|
---|
8150 | /**
|
---|
8151 | * Stores a data dqword, SSE aligned.
|
---|
8152 | *
|
---|
8153 | * @returns Strict VBox status code.
|
---|
8154 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8155 | * @param iSegReg The index of the segment register to use for
|
---|
8156 | * this access. The base and limits are checked.
|
---|
8157 | * @param GCPtrMem The address of the guest memory.
|
---|
8158 | * @param u128Value The value to store.
|
---|
8159 | */
|
---|
8160 | void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
|
---|
8161 | RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
8162 | {
|
---|
8163 | /* The lazy approach for now... */
|
---|
8164 | uint8_t bUnmapInfo;
|
---|
8165 | PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
|
---|
8166 | (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
|
---|
8167 | pu128Dst->au64[0] = u128Value.au64[0];
|
---|
8168 | pu128Dst->au64[1] = u128Value.au64[1];
|
---|
8169 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
8170 | Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
|
---|
8171 | }
|
---|
8172 | #endif
|
---|
8173 |
|
---|
8174 |
|
---|
8175 | /**
|
---|
8176 | * Stores a data dqword.
|
---|
8177 | *
|
---|
8178 | * @returns Strict VBox status code.
|
---|
8179 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8180 | * @param iSegReg The index of the segment register to use for
|
---|
8181 | * this access. The base and limits are checked.
|
---|
8182 | * @param GCPtrMem The address of the guest memory.
|
---|
8183 | * @param pu256Value Pointer to the value to store.
|
---|
8184 | */
|
---|
8185 | VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
|
---|
8186 | {
|
---|
8187 | /* The lazy approach for now... */
|
---|
8188 | uint8_t bUnmapInfo;
|
---|
8189 | PRTUINT256U pu256Dst;
|
---|
8190 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
|
---|
8191 | IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
|
---|
8192 | if (rc == VINF_SUCCESS)
|
---|
8193 | {
|
---|
8194 | pu256Dst->au64[0] = pu256Value->au64[0];
|
---|
8195 | pu256Dst->au64[1] = pu256Value->au64[1];
|
---|
8196 | pu256Dst->au64[2] = pu256Value->au64[2];
|
---|
8197 | pu256Dst->au64[3] = pu256Value->au64[3];
|
---|
8198 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8199 | Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
|
---|
8200 | }
|
---|
8201 | return rc;
|
---|
8202 | }
|
---|
8203 |
|
---|
8204 |
|
---|
8205 | #ifdef IEM_WITH_SETJMP
|
---|
8206 | /**
|
---|
8207 | * Stores a data dqword, longjmp on error.
|
---|
8208 | *
|
---|
8209 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8210 | * @param iSegReg The index of the segment register to use for
|
---|
8211 | * this access. The base and limits are checked.
|
---|
8212 | * @param GCPtrMem The address of the guest memory.
|
---|
8213 | * @param pu256Value Pointer to the value to store.
|
---|
8214 | */
|
---|
8215 | void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
8216 | {
|
---|
8217 | /* The lazy approach for now... */
|
---|
8218 | uint8_t bUnmapInfo;
|
---|
8219 | PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
|
---|
8220 | IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
|
---|
8221 | pu256Dst->au64[0] = pu256Value->au64[0];
|
---|
8222 | pu256Dst->au64[1] = pu256Value->au64[1];
|
---|
8223 | pu256Dst->au64[2] = pu256Value->au64[2];
|
---|
8224 | pu256Dst->au64[3] = pu256Value->au64[3];
|
---|
8225 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
8226 | Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
|
---|
8227 | }
|
---|
8228 | #endif
|
---|
8229 |
|
---|
8230 |
|
---|
8231 | /**
|
---|
8232 | * Stores a descriptor register (sgdt, sidt).
|
---|
8233 | *
|
---|
8234 | * @returns Strict VBox status code.
|
---|
8235 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8236 | * @param cbLimit The limit.
|
---|
8237 | * @param GCPtrBase The base address.
|
---|
8238 | * @param iSegReg The index of the segment register to use for
|
---|
8239 | * this access. The base and limits are checked.
|
---|
8240 | * @param GCPtrMem The address of the guest memory.
|
---|
8241 | */
|
---|
8242 | VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
8243 | {
|
---|
8244 | /*
|
---|
8245 | * The SIDT and SGDT instructions actually stores the data using two
|
---|
8246 | * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
|
---|
8247 | * does not respond to opsize prefixes.
|
---|
8248 | */
|
---|
8249 | VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
|
---|
8250 | if (rcStrict == VINF_SUCCESS)
|
---|
8251 | {
|
---|
8252 | if (IEM_IS_16BIT_CODE(pVCpu))
|
---|
8253 | rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
|
---|
8254 | IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
|
---|
8255 | ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
|
---|
8256 | else if (IEM_IS_32BIT_CODE(pVCpu))
|
---|
8257 | rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
|
---|
8258 | else
|
---|
8259 | rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
|
---|
8260 | }
|
---|
8261 | return rcStrict;
|
---|
8262 | }
|
---|
8263 |
|
---|
8264 |
|
---|
8265 | /**
|
---|
8266 | * Begin a special stack push (used by interrupt, exceptions and such).
|
---|
8267 | *
|
---|
8268 | * This will raise \#SS or \#PF if appropriate.
|
---|
8269 | *
|
---|
8270 | * @returns Strict VBox status code.
|
---|
8271 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8272 | * @param cbMem The number of bytes to push onto the stack.
|
---|
8273 | * @param cbAlign The alignment mask (7, 3, 1).
|
---|
8274 | * @param ppvMem Where to return the pointer to the stack memory.
|
---|
8275 | * As with the other memory functions this could be
|
---|
8276 | * direct access or bounce buffered access, so
|
---|
8277 | * don't commit register until the commit call
|
---|
8278 | * succeeds.
|
---|
8279 | * @param pbUnmapInfo Where to store unmap info for
|
---|
8280 | * iemMemStackPushCommitSpecial.
|
---|
8281 | * @param puNewRsp Where to return the new RSP value. This must be
|
---|
8282 | * passed unchanged to
|
---|
8283 | * iemMemStackPushCommitSpecial().
|
---|
8284 | */
|
---|
8285 | VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
|
---|
8286 | void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
|
---|
8287 | {
|
---|
8288 | Assert(cbMem < UINT8_MAX);
|
---|
8289 | RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
|
---|
8290 | return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
|
---|
8291 | }
|
---|
8292 |
|
---|
8293 |
|
---|
8294 | /**
|
---|
8295 | * Commits a special stack push (started by iemMemStackPushBeginSpecial).
|
---|
8296 | *
|
---|
8297 | * This will update the rSP.
|
---|
8298 | *
|
---|
8299 | * @returns Strict VBox status code.
|
---|
8300 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8301 | * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
|
---|
8302 | * @param uNewRsp The new RSP value returned by
|
---|
8303 | * iemMemStackPushBeginSpecial().
|
---|
8304 | */
|
---|
8305 | VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
|
---|
8306 | {
|
---|
8307 | VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8308 | if (rcStrict == VINF_SUCCESS)
|
---|
8309 | pVCpu->cpum.GstCtx.rsp = uNewRsp;
|
---|
8310 | return rcStrict;
|
---|
8311 | }
|
---|
8312 |
|
---|
8313 |
|
---|
8314 | /**
|
---|
8315 | * Begin a special stack pop (used by iret, retf and such).
|
---|
8316 | *
|
---|
8317 | * This will raise \#SS or \#PF if appropriate.
|
---|
8318 | *
|
---|
8319 | * @returns Strict VBox status code.
|
---|
8320 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8321 | * @param cbMem The number of bytes to pop from the stack.
|
---|
8322 | * @param cbAlign The alignment mask (7, 3, 1).
|
---|
8323 | * @param ppvMem Where to return the pointer to the stack memory.
|
---|
8324 | * @param pbUnmapInfo Where to store unmap info for
|
---|
8325 | * iemMemStackPopDoneSpecial.
|
---|
8326 | * @param puNewRsp Where to return the new RSP value. This must be
|
---|
8327 | * assigned to CPUMCTX::rsp manually some time
|
---|
8328 | * after iemMemStackPopDoneSpecial() has been
|
---|
8329 | * called.
|
---|
8330 | */
|
---|
8331 | VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
|
---|
8332 | void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
|
---|
8333 | {
|
---|
8334 | Assert(cbMem < UINT8_MAX);
|
---|
8335 | RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
|
---|
8336 | return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
|
---|
8337 | }
|
---|
8338 |
|
---|
8339 |
|
---|
8340 | /**
|
---|
8341 | * Continue a special stack pop (used by iret and retf), for the purpose of
|
---|
8342 | * retrieving a new stack pointer.
|
---|
8343 | *
|
---|
8344 | * This will raise \#SS or \#PF if appropriate.
|
---|
8345 | *
|
---|
8346 | * @returns Strict VBox status code.
|
---|
8347 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8348 | * @param off Offset from the top of the stack. This is zero
|
---|
8349 | * except in the retf case.
|
---|
8350 | * @param cbMem The number of bytes to pop from the stack.
|
---|
8351 | * @param ppvMem Where to return the pointer to the stack memory.
|
---|
8352 | * @param pbUnmapInfo Where to store unmap info for
|
---|
8353 | * iemMemStackPopDoneSpecial.
|
---|
8354 | * @param uCurNewRsp The current uncommitted RSP value. (No need to
|
---|
8355 | * return this because all use of this function is
|
---|
8356 | * to retrieve a new value and anything we return
|
---|
8357 | * here would be discarded.)
|
---|
8358 | */
|
---|
8359 | VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
|
---|
8360 | void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
|
---|
8361 | {
|
---|
8362 | Assert(cbMem < UINT8_MAX);
|
---|
8363 |
|
---|
8364 | /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
|
---|
8365 | RTGCPTR GCPtrTop;
|
---|
8366 | if (IEM_IS_64BIT_CODE(pVCpu))
|
---|
8367 | GCPtrTop = uCurNewRsp;
|
---|
8368 | else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
|
---|
8369 | GCPtrTop = (uint32_t)uCurNewRsp;
|
---|
8370 | else
|
---|
8371 | GCPtrTop = (uint16_t)uCurNewRsp;
|
---|
8372 |
|
---|
8373 | return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
|
---|
8374 | 0 /* checked in iemMemStackPopBeginSpecial */);
|
---|
8375 | }
|
---|
8376 |
|
---|
8377 |
|
---|
8378 | /**
|
---|
8379 | * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
|
---|
8380 | * iemMemStackPopContinueSpecial).
|
---|
8381 | *
|
---|
8382 | * The caller will manually commit the rSP.
|
---|
8383 | *
|
---|
8384 | * @returns Strict VBox status code.
|
---|
8385 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8386 | * @param bUnmapInfo Unmap information returned by
|
---|
8387 | * iemMemStackPopBeginSpecial() or
|
---|
8388 | * iemMemStackPopContinueSpecial().
|
---|
8389 | */
|
---|
8390 | VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
8391 | {
|
---|
8392 | return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8393 | }
|
---|
8394 |
|
---|
8395 |
|
---|
8396 | /**
|
---|
8397 | * Fetches a system table byte.
|
---|
8398 | *
|
---|
8399 | * @returns Strict VBox status code.
|
---|
8400 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8401 | * @param pbDst Where to return the byte.
|
---|
8402 | * @param iSegReg The index of the segment register to use for
|
---|
8403 | * this access. The base and limits are checked.
|
---|
8404 | * @param GCPtrMem The address of the guest memory.
|
---|
8405 | */
|
---|
8406 | VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
8407 | {
|
---|
8408 | /* The lazy approach for now... */
|
---|
8409 | uint8_t bUnmapInfo;
|
---|
8410 | uint8_t const *pbSrc;
|
---|
8411 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
|
---|
8412 | if (rc == VINF_SUCCESS)
|
---|
8413 | {
|
---|
8414 | *pbDst = *pbSrc;
|
---|
8415 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8416 | }
|
---|
8417 | return rc;
|
---|
8418 | }
|
---|
8419 |
|
---|
8420 |
|
---|
8421 | /**
|
---|
8422 | * Fetches a system table word.
|
---|
8423 | *
|
---|
8424 | * @returns Strict VBox status code.
|
---|
8425 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8426 | * @param pu16Dst Where to return the word.
|
---|
8427 | * @param iSegReg The index of the segment register to use for
|
---|
8428 | * this access. The base and limits are checked.
|
---|
8429 | * @param GCPtrMem The address of the guest memory.
|
---|
8430 | */
|
---|
8431 | VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
8432 | {
|
---|
8433 | /* The lazy approach for now... */
|
---|
8434 | uint8_t bUnmapInfo;
|
---|
8435 | uint16_t const *pu16Src;
|
---|
8436 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
|
---|
8437 | if (rc == VINF_SUCCESS)
|
---|
8438 | {
|
---|
8439 | *pu16Dst = *pu16Src;
|
---|
8440 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8441 | }
|
---|
8442 | return rc;
|
---|
8443 | }
|
---|
8444 |
|
---|
8445 |
|
---|
8446 | /**
|
---|
8447 | * Fetches a system table dword.
|
---|
8448 | *
|
---|
8449 | * @returns Strict VBox status code.
|
---|
8450 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8451 | * @param pu32Dst Where to return the dword.
|
---|
8452 | * @param iSegReg The index of the segment register to use for
|
---|
8453 | * this access. The base and limits are checked.
|
---|
8454 | * @param GCPtrMem The address of the guest memory.
|
---|
8455 | */
|
---|
8456 | VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
8457 | {
|
---|
8458 | /* The lazy approach for now... */
|
---|
8459 | uint8_t bUnmapInfo;
|
---|
8460 | uint32_t const *pu32Src;
|
---|
8461 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
|
---|
8462 | if (rc == VINF_SUCCESS)
|
---|
8463 | {
|
---|
8464 | *pu32Dst = *pu32Src;
|
---|
8465 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8466 | }
|
---|
8467 | return rc;
|
---|
8468 | }
|
---|
8469 |
|
---|
8470 |
|
---|
8471 | /**
|
---|
8472 | * Fetches a system table qword.
|
---|
8473 | *
|
---|
8474 | * @returns Strict VBox status code.
|
---|
8475 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8476 | * @param pu64Dst Where to return the qword.
|
---|
8477 | * @param iSegReg The index of the segment register to use for
|
---|
8478 | * this access. The base and limits are checked.
|
---|
8479 | * @param GCPtrMem The address of the guest memory.
|
---|
8480 | */
|
---|
8481 | VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
|
---|
8482 | {
|
---|
8483 | /* The lazy approach for now... */
|
---|
8484 | uint8_t bUnmapInfo;
|
---|
8485 | uint64_t const *pu64Src;
|
---|
8486 | VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
|
---|
8487 | if (rc == VINF_SUCCESS)
|
---|
8488 | {
|
---|
8489 | *pu64Dst = *pu64Src;
|
---|
8490 | rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8491 | }
|
---|
8492 | return rc;
|
---|
8493 | }
|
---|
8494 |
|
---|
8495 |
|
---|
8496 | /**
|
---|
8497 | * Fetches a descriptor table entry with caller specified error code.
|
---|
8498 | *
|
---|
8499 | * @returns Strict VBox status code.
|
---|
8500 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8501 | * @param pDesc Where to return the descriptor table entry.
|
---|
8502 | * @param uSel The selector which table entry to fetch.
|
---|
8503 | * @param uXcpt The exception to raise on table lookup error.
|
---|
8504 | * @param uErrorCode The error code associated with the exception.
|
---|
8505 | */
|
---|
8506 | static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
|
---|
8507 | uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
|
---|
8508 | {
|
---|
8509 | AssertPtr(pDesc);
|
---|
8510 | IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
|
---|
8511 |
|
---|
8512 | /** @todo did the 286 require all 8 bytes to be accessible? */
|
---|
8513 | /*
|
---|
8514 | * Get the selector table base and check bounds.
|
---|
8515 | */
|
---|
8516 | RTGCPTR GCPtrBase;
|
---|
8517 | if (uSel & X86_SEL_LDT)
|
---|
8518 | {
|
---|
8519 | if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
|
---|
8520 | || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
|
---|
8521 | {
|
---|
8522 | LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
|
---|
8523 | uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
|
---|
8524 | return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
8525 | uErrorCode, 0);
|
---|
8526 | }
|
---|
8527 |
|
---|
8528 | Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
|
---|
8529 | GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
|
---|
8530 | }
|
---|
8531 | else
|
---|
8532 | {
|
---|
8533 | if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
|
---|
8534 | {
|
---|
8535 | LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
|
---|
8536 | return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
|
---|
8537 | uErrorCode, 0);
|
---|
8538 | }
|
---|
8539 | GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
|
---|
8540 | }
|
---|
8541 |
|
---|
8542 | /*
|
---|
8543 | * Read the legacy descriptor and maybe the long mode extensions if
|
---|
8544 | * required.
|
---|
8545 | */
|
---|
8546 | VBOXSTRICTRC rcStrict;
|
---|
8547 | if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
|
---|
8548 | rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
|
---|
8549 | else
|
---|
8550 | {
|
---|
8551 | rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
|
---|
8552 | if (rcStrict == VINF_SUCCESS)
|
---|
8553 | rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
|
---|
8554 | if (rcStrict == VINF_SUCCESS)
|
---|
8555 | rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
|
---|
8556 | if (rcStrict == VINF_SUCCESS)
|
---|
8557 | pDesc->Legacy.au16[3] = 0;
|
---|
8558 | else
|
---|
8559 | return rcStrict;
|
---|
8560 | }
|
---|
8561 |
|
---|
8562 | if (rcStrict == VINF_SUCCESS)
|
---|
8563 | {
|
---|
8564 | if ( !IEM_IS_LONG_MODE(pVCpu)
|
---|
8565 | || pDesc->Legacy.Gen.u1DescType)
|
---|
8566 | pDesc->Long.au64[1] = 0;
|
---|
8567 | else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
|
---|
8568 | <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
|
---|
8569 | rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
|
---|
8570 | else
|
---|
8571 | {
|
---|
8572 | LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
|
---|
8573 | /** @todo is this the right exception? */
|
---|
8574 | return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
|
---|
8575 | }
|
---|
8576 | }
|
---|
8577 | return rcStrict;
|
---|
8578 | }
|
---|
8579 |
|
---|
8580 |
|
---|
8581 | /**
|
---|
8582 | * Fetches a descriptor table entry.
|
---|
8583 | *
|
---|
8584 | * @returns Strict VBox status code.
|
---|
8585 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8586 | * @param pDesc Where to return the descriptor table entry.
|
---|
8587 | * @param uSel The selector which table entry to fetch.
|
---|
8588 | * @param uXcpt The exception to raise on table lookup error.
|
---|
8589 | */
|
---|
8590 | VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
|
---|
8591 | {
|
---|
8592 | return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
|
---|
8593 | }
|
---|
8594 |
|
---|
8595 |
|
---|
8596 | /**
|
---|
8597 | * Marks the selector descriptor as accessed (only non-system descriptors).
|
---|
8598 | *
|
---|
8599 | * This function ASSUMES that iemMemFetchSelDesc has be called previously and
|
---|
8600 | * will therefore skip the limit checks.
|
---|
8601 | *
|
---|
8602 | * @returns Strict VBox status code.
|
---|
8603 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8604 | * @param uSel The selector.
|
---|
8605 | */
|
---|
8606 | VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
|
---|
8607 | {
|
---|
8608 | /*
|
---|
8609 | * Get the selector table base and calculate the entry address.
|
---|
8610 | */
|
---|
8611 | RTGCPTR GCPtr = uSel & X86_SEL_LDT
|
---|
8612 | ? pVCpu->cpum.GstCtx.ldtr.u64Base
|
---|
8613 | : pVCpu->cpum.GstCtx.gdtr.pGdt;
|
---|
8614 | GCPtr += uSel & X86_SEL_MASK;
|
---|
8615 |
|
---|
8616 | /*
|
---|
8617 | * ASMAtomicBitSet will assert if the address is misaligned, so do some
|
---|
8618 | * ugly stuff to avoid this. This will make sure it's an atomic access
|
---|
8619 | * as well more or less remove any question about 8-bit or 32-bit accesss.
|
---|
8620 | */
|
---|
8621 | VBOXSTRICTRC rcStrict;
|
---|
8622 | uint8_t bUnmapInfo;
|
---|
8623 | uint32_t volatile *pu32;
|
---|
8624 | if ((GCPtr & 3) == 0)
|
---|
8625 | {
|
---|
8626 | /* The normal case, map the 32-bit bits around the accessed bit (40). */
|
---|
8627 | GCPtr += 2 + 2;
|
---|
8628 | rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
|
---|
8629 | if (rcStrict != VINF_SUCCESS)
|
---|
8630 | return rcStrict;
|
---|
8631 | ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
|
---|
8632 | }
|
---|
8633 | else
|
---|
8634 | {
|
---|
8635 | /* The misaligned GDT/LDT case, map the whole thing. */
|
---|
8636 | rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
|
---|
8637 | if (rcStrict != VINF_SUCCESS)
|
---|
8638 | return rcStrict;
|
---|
8639 | switch ((uintptr_t)pu32 & 3)
|
---|
8640 | {
|
---|
8641 | case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
|
---|
8642 | case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
|
---|
8643 | case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
|
---|
8644 | case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
|
---|
8645 | }
|
---|
8646 | }
|
---|
8647 |
|
---|
8648 | return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
|
---|
8649 | }
|
---|
8650 |
|
---|
8651 |
|
---|
8652 | #undef LOG_GROUP
|
---|
8653 | #define LOG_GROUP LOG_GROUP_IEM
|
---|
8654 |
|
---|
8655 | /** @} */
|
---|
8656 |
|
---|
8657 | /** @name Opcode Helpers.
|
---|
8658 | * @{
|
---|
8659 | */
|
---|
8660 |
|
---|
8661 | /**
|
---|
8662 | * Calculates the effective address of a ModR/M memory operand.
|
---|
8663 | *
|
---|
8664 | * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
|
---|
8665 | *
|
---|
8666 | * @return Strict VBox status code.
|
---|
8667 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8668 | * @param bRm The ModRM byte.
|
---|
8669 | * @param cbImmAndRspOffset - First byte: The size of any immediate
|
---|
8670 | * following the effective address opcode bytes
|
---|
8671 | * (only for RIP relative addressing).
|
---|
8672 | * - Second byte: RSP displacement (for POP [ESP]).
|
---|
8673 | * @param pGCPtrEff Where to return the effective address.
|
---|
8674 | */
|
---|
8675 | VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
|
---|
8676 | {
|
---|
8677 | Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
|
---|
8678 | # define SET_SS_DEF() \
|
---|
8679 | do \
|
---|
8680 | { \
|
---|
8681 | if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
|
---|
8682 | pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
|
---|
8683 | } while (0)
|
---|
8684 |
|
---|
8685 | if (!IEM_IS_64BIT_CODE(pVCpu))
|
---|
8686 | {
|
---|
8687 | /** @todo Check the effective address size crap! */
|
---|
8688 | if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
|
---|
8689 | {
|
---|
8690 | uint16_t u16EffAddr;
|
---|
8691 |
|
---|
8692 | /* Handle the disp16 form with no registers first. */
|
---|
8693 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
|
---|
8694 | IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
|
---|
8695 | else
|
---|
8696 | {
|
---|
8697 | /* Get the displacment. */
|
---|
8698 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
8699 | {
|
---|
8700 | case 0: u16EffAddr = 0; break;
|
---|
8701 | case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
|
---|
8702 | case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
|
---|
8703 | default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
|
---|
8704 | }
|
---|
8705 |
|
---|
8706 | /* Add the base and index registers to the disp. */
|
---|
8707 | switch (bRm & X86_MODRM_RM_MASK)
|
---|
8708 | {
|
---|
8709 | case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
|
---|
8710 | case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
|
---|
8711 | case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
|
---|
8712 | case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
|
---|
8713 | case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
|
---|
8714 | case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
|
---|
8715 | case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
|
---|
8716 | case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
|
---|
8717 | }
|
---|
8718 | }
|
---|
8719 |
|
---|
8720 | *pGCPtrEff = u16EffAddr;
|
---|
8721 | }
|
---|
8722 | else
|
---|
8723 | {
|
---|
8724 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
8725 | uint32_t u32EffAddr;
|
---|
8726 |
|
---|
8727 | /* Handle the disp32 form with no registers first. */
|
---|
8728 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
8729 | IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
|
---|
8730 | else
|
---|
8731 | {
|
---|
8732 | /* Get the register (or SIB) value. */
|
---|
8733 | switch ((bRm & X86_MODRM_RM_MASK))
|
---|
8734 | {
|
---|
8735 | case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
|
---|
8736 | case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
|
---|
8737 | case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
|
---|
8738 | case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
|
---|
8739 | case 4: /* SIB */
|
---|
8740 | {
|
---|
8741 | uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
|
---|
8742 |
|
---|
8743 | /* Get the index and scale it. */
|
---|
8744 | switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
|
---|
8745 | {
|
---|
8746 | case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
|
---|
8747 | case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
|
---|
8748 | case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
|
---|
8749 | case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
|
---|
8750 | case 4: u32EffAddr = 0; /*none */ break;
|
---|
8751 | case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
|
---|
8752 | case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
|
---|
8753 | case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
|
---|
8754 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
8755 | }
|
---|
8756 | u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
8757 |
|
---|
8758 | /* add base */
|
---|
8759 | switch (bSib & X86_SIB_BASE_MASK)
|
---|
8760 | {
|
---|
8761 | case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
|
---|
8762 | case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
|
---|
8763 | case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
|
---|
8764 | case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
|
---|
8765 | case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
|
---|
8766 | case 5:
|
---|
8767 | if ((bRm & X86_MODRM_MOD_MASK) != 0)
|
---|
8768 | {
|
---|
8769 | u32EffAddr += pVCpu->cpum.GstCtx.ebp;
|
---|
8770 | SET_SS_DEF();
|
---|
8771 | }
|
---|
8772 | else
|
---|
8773 | {
|
---|
8774 | uint32_t u32Disp;
|
---|
8775 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
8776 | u32EffAddr += u32Disp;
|
---|
8777 | }
|
---|
8778 | break;
|
---|
8779 | case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
|
---|
8780 | case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
|
---|
8781 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
8782 | }
|
---|
8783 | break;
|
---|
8784 | }
|
---|
8785 | case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
|
---|
8786 | case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
|
---|
8787 | case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
|
---|
8788 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
8789 | }
|
---|
8790 |
|
---|
8791 | /* Get and add the displacement. */
|
---|
8792 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
8793 | {
|
---|
8794 | case 0:
|
---|
8795 | break;
|
---|
8796 | case 1:
|
---|
8797 | {
|
---|
8798 | int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
|
---|
8799 | u32EffAddr += i8Disp;
|
---|
8800 | break;
|
---|
8801 | }
|
---|
8802 | case 2:
|
---|
8803 | {
|
---|
8804 | uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
8805 | u32EffAddr += u32Disp;
|
---|
8806 | break;
|
---|
8807 | }
|
---|
8808 | default:
|
---|
8809 | AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
|
---|
8810 | }
|
---|
8811 |
|
---|
8812 | }
|
---|
8813 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
8814 | *pGCPtrEff = u32EffAddr;
|
---|
8815 | }
|
---|
8816 | }
|
---|
8817 | else
|
---|
8818 | {
|
---|
8819 | uint64_t u64EffAddr;
|
---|
8820 |
|
---|
8821 | /* Handle the rip+disp32 form with no registers first. */
|
---|
8822 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
8823 | {
|
---|
8824 | IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
|
---|
8825 | u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
|
---|
8826 | }
|
---|
8827 | else
|
---|
8828 | {
|
---|
8829 | /* Get the register (or SIB) value. */
|
---|
8830 | switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
|
---|
8831 | {
|
---|
8832 | case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
|
---|
8833 | case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
|
---|
8834 | case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
|
---|
8835 | case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
|
---|
8836 | case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
|
---|
8837 | case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
|
---|
8838 | case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
|
---|
8839 | case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
|
---|
8840 | case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
|
---|
8841 | case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
|
---|
8842 | case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
|
---|
8843 | case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
|
---|
8844 | case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
|
---|
8845 | case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
|
---|
8846 | /* SIB */
|
---|
8847 | case 4:
|
---|
8848 | case 12:
|
---|
8849 | {
|
---|
8850 | uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
|
---|
8851 |
|
---|
8852 | /* Get the index and scale it. */
|
---|
8853 | switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
|
---|
8854 | {
|
---|
8855 | case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
|
---|
8856 | case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
|
---|
8857 | case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
|
---|
8858 | case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
|
---|
8859 | case 4: u64EffAddr = 0; /*none */ break;
|
---|
8860 | case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
|
---|
8861 | case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
|
---|
8862 | case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
|
---|
8863 | case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
|
---|
8864 | case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
|
---|
8865 | case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
|
---|
8866 | case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
|
---|
8867 | case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
|
---|
8868 | case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
|
---|
8869 | case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
|
---|
8870 | case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
|
---|
8871 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
8872 | }
|
---|
8873 | u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
8874 |
|
---|
8875 | /* add base */
|
---|
8876 | switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
|
---|
8877 | {
|
---|
8878 | case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
|
---|
8879 | case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
|
---|
8880 | case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
|
---|
8881 | case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
|
---|
8882 | case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
|
---|
8883 | case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
|
---|
8884 | case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
|
---|
8885 | case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
|
---|
8886 | case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
|
---|
8887 | case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
|
---|
8888 | case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
|
---|
8889 | case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
|
---|
8890 | case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
|
---|
8891 | case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
|
---|
8892 | /* complicated encodings */
|
---|
8893 | case 5:
|
---|
8894 | case 13:
|
---|
8895 | if ((bRm & X86_MODRM_MOD_MASK) != 0)
|
---|
8896 | {
|
---|
8897 | if (!pVCpu->iem.s.uRexB)
|
---|
8898 | {
|
---|
8899 | u64EffAddr += pVCpu->cpum.GstCtx.rbp;
|
---|
8900 | SET_SS_DEF();
|
---|
8901 | }
|
---|
8902 | else
|
---|
8903 | u64EffAddr += pVCpu->cpum.GstCtx.r13;
|
---|
8904 | }
|
---|
8905 | else
|
---|
8906 | {
|
---|
8907 | uint32_t u32Disp;
|
---|
8908 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
8909 | u64EffAddr += (int32_t)u32Disp;
|
---|
8910 | }
|
---|
8911 | break;
|
---|
8912 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
8913 | }
|
---|
8914 | break;
|
---|
8915 | }
|
---|
8916 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
8917 | }
|
---|
8918 |
|
---|
8919 | /* Get and add the displacement. */
|
---|
8920 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
8921 | {
|
---|
8922 | case 0:
|
---|
8923 | break;
|
---|
8924 | case 1:
|
---|
8925 | {
|
---|
8926 | int8_t i8Disp;
|
---|
8927 | IEM_OPCODE_GET_NEXT_S8(&i8Disp);
|
---|
8928 | u64EffAddr += i8Disp;
|
---|
8929 | break;
|
---|
8930 | }
|
---|
8931 | case 2:
|
---|
8932 | {
|
---|
8933 | uint32_t u32Disp;
|
---|
8934 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
8935 | u64EffAddr += (int32_t)u32Disp;
|
---|
8936 | break;
|
---|
8937 | }
|
---|
8938 | IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
|
---|
8939 | }
|
---|
8940 |
|
---|
8941 | }
|
---|
8942 |
|
---|
8943 | if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
|
---|
8944 | *pGCPtrEff = u64EffAddr;
|
---|
8945 | else
|
---|
8946 | {
|
---|
8947 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
8948 | *pGCPtrEff = u64EffAddr & UINT32_MAX;
|
---|
8949 | }
|
---|
8950 | }
|
---|
8951 |
|
---|
8952 | Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
|
---|
8953 | return VINF_SUCCESS;
|
---|
8954 | }
|
---|
8955 |
|
---|
8956 |
|
---|
8957 | #ifdef IEM_WITH_SETJMP
|
---|
8958 | /**
|
---|
8959 | * Calculates the effective address of a ModR/M memory operand.
|
---|
8960 | *
|
---|
8961 | * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
|
---|
8962 | *
|
---|
8963 | * May longjmp on internal error.
|
---|
8964 | *
|
---|
8965 | * @return The effective address.
|
---|
8966 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
8967 | * @param bRm The ModRM byte.
|
---|
8968 | * @param cbImmAndRspOffset - First byte: The size of any immediate
|
---|
8969 | * following the effective address opcode bytes
|
---|
8970 | * (only for RIP relative addressing).
|
---|
8971 | * - Second byte: RSP displacement (for POP [ESP]).
|
---|
8972 | */
|
---|
8973 | RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
8974 | {
|
---|
8975 | Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
|
---|
8976 | # define SET_SS_DEF() \
|
---|
8977 | do \
|
---|
8978 | { \
|
---|
8979 | if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
|
---|
8980 | pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
|
---|
8981 | } while (0)
|
---|
8982 |
|
---|
8983 | if (!IEM_IS_64BIT_CODE(pVCpu))
|
---|
8984 | {
|
---|
8985 | /** @todo Check the effective address size crap! */
|
---|
8986 | if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
|
---|
8987 | {
|
---|
8988 | uint16_t u16EffAddr;
|
---|
8989 |
|
---|
8990 | /* Handle the disp16 form with no registers first. */
|
---|
8991 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
|
---|
8992 | IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
|
---|
8993 | else
|
---|
8994 | {
|
---|
8995 | /* Get the displacment. */
|
---|
8996 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
8997 | {
|
---|
8998 | case 0: u16EffAddr = 0; break;
|
---|
8999 | case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
|
---|
9000 | case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
|
---|
9001 | default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
|
---|
9002 | }
|
---|
9003 |
|
---|
9004 | /* Add the base and index registers to the disp. */
|
---|
9005 | switch (bRm & X86_MODRM_RM_MASK)
|
---|
9006 | {
|
---|
9007 | case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
|
---|
9008 | case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
|
---|
9009 | case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
|
---|
9010 | case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
|
---|
9011 | case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
|
---|
9012 | case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
|
---|
9013 | case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
|
---|
9014 | case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
|
---|
9015 | }
|
---|
9016 | }
|
---|
9017 |
|
---|
9018 | Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
|
---|
9019 | return u16EffAddr;
|
---|
9020 | }
|
---|
9021 |
|
---|
9022 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
9023 | uint32_t u32EffAddr;
|
---|
9024 |
|
---|
9025 | /* Handle the disp32 form with no registers first. */
|
---|
9026 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
9027 | IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
|
---|
9028 | else
|
---|
9029 | {
|
---|
9030 | /* Get the register (or SIB) value. */
|
---|
9031 | switch ((bRm & X86_MODRM_RM_MASK))
|
---|
9032 | {
|
---|
9033 | case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
|
---|
9034 | case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
|
---|
9035 | case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
|
---|
9036 | case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
|
---|
9037 | case 4: /* SIB */
|
---|
9038 | {
|
---|
9039 | uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
|
---|
9040 |
|
---|
9041 | /* Get the index and scale it. */
|
---|
9042 | switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
|
---|
9043 | {
|
---|
9044 | case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
|
---|
9045 | case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
|
---|
9046 | case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
|
---|
9047 | case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
|
---|
9048 | case 4: u32EffAddr = 0; /*none */ break;
|
---|
9049 | case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
|
---|
9050 | case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
|
---|
9051 | case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
|
---|
9052 | IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
|
---|
9053 | }
|
---|
9054 | u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
9055 |
|
---|
9056 | /* add base */
|
---|
9057 | switch (bSib & X86_SIB_BASE_MASK)
|
---|
9058 | {
|
---|
9059 | case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
|
---|
9060 | case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
|
---|
9061 | case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
|
---|
9062 | case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
|
---|
9063 | case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
|
---|
9064 | case 5:
|
---|
9065 | if ((bRm & X86_MODRM_MOD_MASK) != 0)
|
---|
9066 | {
|
---|
9067 | u32EffAddr += pVCpu->cpum.GstCtx.ebp;
|
---|
9068 | SET_SS_DEF();
|
---|
9069 | }
|
---|
9070 | else
|
---|
9071 | {
|
---|
9072 | uint32_t u32Disp;
|
---|
9073 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
9074 | u32EffAddr += u32Disp;
|
---|
9075 | }
|
---|
9076 | break;
|
---|
9077 | case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
|
---|
9078 | case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
|
---|
9079 | IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
|
---|
9080 | }
|
---|
9081 | break;
|
---|
9082 | }
|
---|
9083 | case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
|
---|
9084 | case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
|
---|
9085 | case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
|
---|
9086 | IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
|
---|
9087 | }
|
---|
9088 |
|
---|
9089 | /* Get and add the displacement. */
|
---|
9090 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
9091 | {
|
---|
9092 | case 0:
|
---|
9093 | break;
|
---|
9094 | case 1:
|
---|
9095 | {
|
---|
9096 | int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
|
---|
9097 | u32EffAddr += i8Disp;
|
---|
9098 | break;
|
---|
9099 | }
|
---|
9100 | case 2:
|
---|
9101 | {
|
---|
9102 | uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
9103 | u32EffAddr += u32Disp;
|
---|
9104 | break;
|
---|
9105 | }
|
---|
9106 | default:
|
---|
9107 | AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
|
---|
9108 | }
|
---|
9109 | }
|
---|
9110 |
|
---|
9111 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
9112 | Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
|
---|
9113 | return u32EffAddr;
|
---|
9114 | }
|
---|
9115 |
|
---|
9116 | uint64_t u64EffAddr;
|
---|
9117 |
|
---|
9118 | /* Handle the rip+disp32 form with no registers first. */
|
---|
9119 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
9120 | {
|
---|
9121 | IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
|
---|
9122 | u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
|
---|
9123 | }
|
---|
9124 | else
|
---|
9125 | {
|
---|
9126 | /* Get the register (or SIB) value. */
|
---|
9127 | switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
|
---|
9128 | {
|
---|
9129 | case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
|
---|
9130 | case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
|
---|
9131 | case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
|
---|
9132 | case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
|
---|
9133 | case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
|
---|
9134 | case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
|
---|
9135 | case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
|
---|
9136 | case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
|
---|
9137 | case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
|
---|
9138 | case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
|
---|
9139 | case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
|
---|
9140 | case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
|
---|
9141 | case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
|
---|
9142 | case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
|
---|
9143 | /* SIB */
|
---|
9144 | case 4:
|
---|
9145 | case 12:
|
---|
9146 | {
|
---|
9147 | uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
|
---|
9148 |
|
---|
9149 | /* Get the index and scale it. */
|
---|
9150 | switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
|
---|
9151 | {
|
---|
9152 | case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
|
---|
9153 | case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
|
---|
9154 | case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
|
---|
9155 | case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
|
---|
9156 | case 4: u64EffAddr = 0; /*none */ break;
|
---|
9157 | case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
|
---|
9158 | case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
|
---|
9159 | case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
|
---|
9160 | case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
|
---|
9161 | case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
|
---|
9162 | case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
|
---|
9163 | case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
|
---|
9164 | case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
|
---|
9165 | case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
|
---|
9166 | case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
|
---|
9167 | case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
|
---|
9168 | IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
|
---|
9169 | }
|
---|
9170 | u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
9171 |
|
---|
9172 | /* add base */
|
---|
9173 | switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
|
---|
9174 | {
|
---|
9175 | case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
|
---|
9176 | case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
|
---|
9177 | case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
|
---|
9178 | case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
|
---|
9179 | case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
|
---|
9180 | case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
|
---|
9181 | case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
|
---|
9182 | case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
|
---|
9183 | case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
|
---|
9184 | case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
|
---|
9185 | case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
|
---|
9186 | case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
|
---|
9187 | case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
|
---|
9188 | case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
|
---|
9189 | /* complicated encodings */
|
---|
9190 | case 5:
|
---|
9191 | case 13:
|
---|
9192 | if ((bRm & X86_MODRM_MOD_MASK) != 0)
|
---|
9193 | {
|
---|
9194 | if (!pVCpu->iem.s.uRexB)
|
---|
9195 | {
|
---|
9196 | u64EffAddr += pVCpu->cpum.GstCtx.rbp;
|
---|
9197 | SET_SS_DEF();
|
---|
9198 | }
|
---|
9199 | else
|
---|
9200 | u64EffAddr += pVCpu->cpum.GstCtx.r13;
|
---|
9201 | }
|
---|
9202 | else
|
---|
9203 | {
|
---|
9204 | uint32_t u32Disp;
|
---|
9205 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
9206 | u64EffAddr += (int32_t)u32Disp;
|
---|
9207 | }
|
---|
9208 | break;
|
---|
9209 | IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
|
---|
9210 | }
|
---|
9211 | break;
|
---|
9212 | }
|
---|
9213 | IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
|
---|
9214 | }
|
---|
9215 |
|
---|
9216 | /* Get and add the displacement. */
|
---|
9217 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
9218 | {
|
---|
9219 | case 0:
|
---|
9220 | break;
|
---|
9221 | case 1:
|
---|
9222 | {
|
---|
9223 | int8_t i8Disp;
|
---|
9224 | IEM_OPCODE_GET_NEXT_S8(&i8Disp);
|
---|
9225 | u64EffAddr += i8Disp;
|
---|
9226 | break;
|
---|
9227 | }
|
---|
9228 | case 2:
|
---|
9229 | {
|
---|
9230 | uint32_t u32Disp;
|
---|
9231 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
9232 | u64EffAddr += (int32_t)u32Disp;
|
---|
9233 | break;
|
---|
9234 | }
|
---|
9235 | IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
|
---|
9236 | }
|
---|
9237 |
|
---|
9238 | }
|
---|
9239 |
|
---|
9240 | if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
|
---|
9241 | {
|
---|
9242 | Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
|
---|
9243 | return u64EffAddr;
|
---|
9244 | }
|
---|
9245 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
9246 | Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
|
---|
9247 | return u64EffAddr & UINT32_MAX;
|
---|
9248 | }
|
---|
9249 | #endif /* IEM_WITH_SETJMP */
|
---|
9250 |
|
---|
9251 |
|
---|
9252 | /**
|
---|
9253 | * Calculates the effective address of a ModR/M memory operand, extended version
|
---|
9254 | * for use in the recompilers.
|
---|
9255 | *
|
---|
9256 | * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
|
---|
9257 | *
|
---|
9258 | * @return Strict VBox status code.
|
---|
9259 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
9260 | * @param bRm The ModRM byte.
|
---|
9261 | * @param cbImmAndRspOffset - First byte: The size of any immediate
|
---|
9262 | * following the effective address opcode bytes
|
---|
9263 | * (only for RIP relative addressing).
|
---|
9264 | * - Second byte: RSP displacement (for POP [ESP]).
|
---|
9265 | * @param pGCPtrEff Where to return the effective address.
|
---|
9266 | * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
|
---|
9267 | * SIB byte (bits 39:32).
|
---|
9268 | */
|
---|
9269 | VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
|
---|
9270 | {
|
---|
9271 | Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
|
---|
9272 | # define SET_SS_DEF() \
|
---|
9273 | do \
|
---|
9274 | { \
|
---|
9275 | if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
|
---|
9276 | pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
|
---|
9277 | } while (0)
|
---|
9278 |
|
---|
9279 | uint64_t uInfo;
|
---|
9280 | if (!IEM_IS_64BIT_CODE(pVCpu))
|
---|
9281 | {
|
---|
9282 | /** @todo Check the effective address size crap! */
|
---|
9283 | if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
|
---|
9284 | {
|
---|
9285 | uint16_t u16EffAddr;
|
---|
9286 |
|
---|
9287 | /* Handle the disp16 form with no registers first. */
|
---|
9288 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
|
---|
9289 | {
|
---|
9290 | IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
|
---|
9291 | uInfo = u16EffAddr;
|
---|
9292 | }
|
---|
9293 | else
|
---|
9294 | {
|
---|
9295 | /* Get the displacment. */
|
---|
9296 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
9297 | {
|
---|
9298 | case 0: u16EffAddr = 0; break;
|
---|
9299 | case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
|
---|
9300 | case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
|
---|
9301 | default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
|
---|
9302 | }
|
---|
9303 | uInfo = u16EffAddr;
|
---|
9304 |
|
---|
9305 | /* Add the base and index registers to the disp. */
|
---|
9306 | switch (bRm & X86_MODRM_RM_MASK)
|
---|
9307 | {
|
---|
9308 | case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
|
---|
9309 | case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
|
---|
9310 | case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
|
---|
9311 | case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
|
---|
9312 | case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
|
---|
9313 | case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
|
---|
9314 | case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
|
---|
9315 | case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
|
---|
9316 | }
|
---|
9317 | }
|
---|
9318 |
|
---|
9319 | *pGCPtrEff = u16EffAddr;
|
---|
9320 | }
|
---|
9321 | else
|
---|
9322 | {
|
---|
9323 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
9324 | uint32_t u32EffAddr;
|
---|
9325 |
|
---|
9326 | /* Handle the disp32 form with no registers first. */
|
---|
9327 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
9328 | {
|
---|
9329 | IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
|
---|
9330 | uInfo = u32EffAddr;
|
---|
9331 | }
|
---|
9332 | else
|
---|
9333 | {
|
---|
9334 | /* Get the register (or SIB) value. */
|
---|
9335 | uInfo = 0;
|
---|
9336 | switch ((bRm & X86_MODRM_RM_MASK))
|
---|
9337 | {
|
---|
9338 | case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
|
---|
9339 | case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
|
---|
9340 | case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
|
---|
9341 | case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
|
---|
9342 | case 4: /* SIB */
|
---|
9343 | {
|
---|
9344 | uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
|
---|
9345 | uInfo = (uint64_t)bSib << 32;
|
---|
9346 |
|
---|
9347 | /* Get the index and scale it. */
|
---|
9348 | switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
|
---|
9349 | {
|
---|
9350 | case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
|
---|
9351 | case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
|
---|
9352 | case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
|
---|
9353 | case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
|
---|
9354 | case 4: u32EffAddr = 0; /*none */ break;
|
---|
9355 | case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
|
---|
9356 | case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
|
---|
9357 | case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
|
---|
9358 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
9359 | }
|
---|
9360 | u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
9361 |
|
---|
9362 | /* add base */
|
---|
9363 | switch (bSib & X86_SIB_BASE_MASK)
|
---|
9364 | {
|
---|
9365 | case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
|
---|
9366 | case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
|
---|
9367 | case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
|
---|
9368 | case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
|
---|
9369 | case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
|
---|
9370 | case 5:
|
---|
9371 | if ((bRm & X86_MODRM_MOD_MASK) != 0)
|
---|
9372 | {
|
---|
9373 | u32EffAddr += pVCpu->cpum.GstCtx.ebp;
|
---|
9374 | SET_SS_DEF();
|
---|
9375 | }
|
---|
9376 | else
|
---|
9377 | {
|
---|
9378 | uint32_t u32Disp;
|
---|
9379 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
9380 | u32EffAddr += u32Disp;
|
---|
9381 | uInfo |= u32Disp;
|
---|
9382 | }
|
---|
9383 | break;
|
---|
9384 | case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
|
---|
9385 | case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
|
---|
9386 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
9387 | }
|
---|
9388 | break;
|
---|
9389 | }
|
---|
9390 | case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
|
---|
9391 | case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
|
---|
9392 | case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
|
---|
9393 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
9394 | }
|
---|
9395 |
|
---|
9396 | /* Get and add the displacement. */
|
---|
9397 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
9398 | {
|
---|
9399 | case 0:
|
---|
9400 | break;
|
---|
9401 | case 1:
|
---|
9402 | {
|
---|
9403 | int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
|
---|
9404 | u32EffAddr += i8Disp;
|
---|
9405 | uInfo |= (uint32_t)(int32_t)i8Disp;
|
---|
9406 | break;
|
---|
9407 | }
|
---|
9408 | case 2:
|
---|
9409 | {
|
---|
9410 | uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
9411 | u32EffAddr += u32Disp;
|
---|
9412 | uInfo |= (uint32_t)u32Disp;
|
---|
9413 | break;
|
---|
9414 | }
|
---|
9415 | default:
|
---|
9416 | AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
|
---|
9417 | }
|
---|
9418 |
|
---|
9419 | }
|
---|
9420 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
9421 | *pGCPtrEff = u32EffAddr;
|
---|
9422 | }
|
---|
9423 | }
|
---|
9424 | else
|
---|
9425 | {
|
---|
9426 | uint64_t u64EffAddr;
|
---|
9427 |
|
---|
9428 | /* Handle the rip+disp32 form with no registers first. */
|
---|
9429 | if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
|
---|
9430 | {
|
---|
9431 | IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
|
---|
9432 | uInfo = (uint32_t)u64EffAddr;
|
---|
9433 | u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
|
---|
9434 | }
|
---|
9435 | else
|
---|
9436 | {
|
---|
9437 | /* Get the register (or SIB) value. */
|
---|
9438 | uInfo = 0;
|
---|
9439 | switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
|
---|
9440 | {
|
---|
9441 | case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
|
---|
9442 | case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
|
---|
9443 | case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
|
---|
9444 | case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
|
---|
9445 | case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
|
---|
9446 | case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
|
---|
9447 | case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
|
---|
9448 | case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
|
---|
9449 | case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
|
---|
9450 | case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
|
---|
9451 | case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
|
---|
9452 | case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
|
---|
9453 | case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
|
---|
9454 | case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
|
---|
9455 | /* SIB */
|
---|
9456 | case 4:
|
---|
9457 | case 12:
|
---|
9458 | {
|
---|
9459 | uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
|
---|
9460 | uInfo = (uint64_t)bSib << 32;
|
---|
9461 |
|
---|
9462 | /* Get the index and scale it. */
|
---|
9463 | switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
|
---|
9464 | {
|
---|
9465 | case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
|
---|
9466 | case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
|
---|
9467 | case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
|
---|
9468 | case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
|
---|
9469 | case 4: u64EffAddr = 0; /*none */ break;
|
---|
9470 | case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
|
---|
9471 | case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
|
---|
9472 | case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
|
---|
9473 | case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
|
---|
9474 | case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
|
---|
9475 | case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
|
---|
9476 | case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
|
---|
9477 | case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
|
---|
9478 | case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
|
---|
9479 | case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
|
---|
9480 | case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
|
---|
9481 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
9482 | }
|
---|
9483 | u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
|
---|
9484 |
|
---|
9485 | /* add base */
|
---|
9486 | switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
|
---|
9487 | {
|
---|
9488 | case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
|
---|
9489 | case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
|
---|
9490 | case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
|
---|
9491 | case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
|
---|
9492 | case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
|
---|
9493 | case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
|
---|
9494 | case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
|
---|
9495 | case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
|
---|
9496 | case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
|
---|
9497 | case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
|
---|
9498 | case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
|
---|
9499 | case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
|
---|
9500 | case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
|
---|
9501 | case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
|
---|
9502 | /* complicated encodings */
|
---|
9503 | case 5:
|
---|
9504 | case 13:
|
---|
9505 | if ((bRm & X86_MODRM_MOD_MASK) != 0)
|
---|
9506 | {
|
---|
9507 | if (!pVCpu->iem.s.uRexB)
|
---|
9508 | {
|
---|
9509 | u64EffAddr += pVCpu->cpum.GstCtx.rbp;
|
---|
9510 | SET_SS_DEF();
|
---|
9511 | }
|
---|
9512 | else
|
---|
9513 | u64EffAddr += pVCpu->cpum.GstCtx.r13;
|
---|
9514 | }
|
---|
9515 | else
|
---|
9516 | {
|
---|
9517 | uint32_t u32Disp;
|
---|
9518 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
9519 | u64EffAddr += (int32_t)u32Disp;
|
---|
9520 | uInfo |= u32Disp;
|
---|
9521 | }
|
---|
9522 | break;
|
---|
9523 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
9524 | }
|
---|
9525 | break;
|
---|
9526 | }
|
---|
9527 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
9528 | }
|
---|
9529 |
|
---|
9530 | /* Get and add the displacement. */
|
---|
9531 | switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
|
---|
9532 | {
|
---|
9533 | case 0:
|
---|
9534 | break;
|
---|
9535 | case 1:
|
---|
9536 | {
|
---|
9537 | int8_t i8Disp;
|
---|
9538 | IEM_OPCODE_GET_NEXT_S8(&i8Disp);
|
---|
9539 | u64EffAddr += i8Disp;
|
---|
9540 | uInfo |= (uint32_t)(int32_t)i8Disp;
|
---|
9541 | break;
|
---|
9542 | }
|
---|
9543 | case 2:
|
---|
9544 | {
|
---|
9545 | uint32_t u32Disp;
|
---|
9546 | IEM_OPCODE_GET_NEXT_U32(&u32Disp);
|
---|
9547 | u64EffAddr += (int32_t)u32Disp;
|
---|
9548 | uInfo |= u32Disp;
|
---|
9549 | break;
|
---|
9550 | }
|
---|
9551 | IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
|
---|
9552 | }
|
---|
9553 |
|
---|
9554 | }
|
---|
9555 |
|
---|
9556 | if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
|
---|
9557 | *pGCPtrEff = u64EffAddr;
|
---|
9558 | else
|
---|
9559 | {
|
---|
9560 | Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
|
---|
9561 | *pGCPtrEff = u64EffAddr & UINT32_MAX;
|
---|
9562 | }
|
---|
9563 | }
|
---|
9564 | *puInfo = uInfo;
|
---|
9565 |
|
---|
9566 | Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
|
---|
9567 | return VINF_SUCCESS;
|
---|
9568 | }
|
---|
9569 |
|
---|
9570 | /** @} */
|
---|
9571 |
|
---|
9572 |
|
---|
9573 | #ifdef LOG_ENABLED
|
---|
9574 | /**
|
---|
9575 | * Logs the current instruction.
|
---|
9576 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
9577 | * @param fSameCtx Set if we have the same context information as the VMM,
|
---|
9578 | * clear if we may have already executed an instruction in
|
---|
9579 | * our debug context. When clear, we assume IEMCPU holds
|
---|
9580 | * valid CPU mode info.
|
---|
9581 | *
|
---|
9582 | * The @a fSameCtx parameter is now misleading and obsolete.
|
---|
9583 | * @param pszFunction The IEM function doing the execution.
|
---|
9584 | */
|
---|
9585 | static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
|
---|
9586 | {
|
---|
9587 | # ifdef IN_RING3
|
---|
9588 | if (LogIs2Enabled())
|
---|
9589 | {
|
---|
9590 | char szInstr[256];
|
---|
9591 | uint32_t cbInstr = 0;
|
---|
9592 | if (fSameCtx)
|
---|
9593 | DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
|
---|
9594 | DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
|
---|
9595 | szInstr, sizeof(szInstr), &cbInstr);
|
---|
9596 | else
|
---|
9597 | {
|
---|
9598 | uint32_t fFlags = 0;
|
---|
9599 | switch (IEM_GET_CPU_MODE(pVCpu))
|
---|
9600 | {
|
---|
9601 | case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
|
---|
9602 | case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
|
---|
9603 | case IEMMODE_16BIT:
|
---|
9604 | if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
|
---|
9605 | fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
|
---|
9606 | else
|
---|
9607 | fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
|
---|
9608 | break;
|
---|
9609 | }
|
---|
9610 | DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
|
---|
9611 | szInstr, sizeof(szInstr), &cbInstr);
|
---|
9612 | }
|
---|
9613 |
|
---|
9614 | PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
|
---|
9615 | Log2(("**** %s fExec=%x\n"
|
---|
9616 | " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
|
---|
9617 | " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
|
---|
9618 | " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
|
---|
9619 | " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
|
---|
9620 | " %s\n"
|
---|
9621 | , pszFunction, pVCpu->iem.s.fExec,
|
---|
9622 | pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
|
---|
9623 | pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
|
---|
9624 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
|
---|
9625 | pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
|
---|
9626 | pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
|
---|
9627 | szInstr));
|
---|
9628 |
|
---|
9629 | /* This stuff sucks atm. as it fills the log with MSRs. */
|
---|
9630 | //if (LogIs3Enabled())
|
---|
9631 | // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
|
---|
9632 | }
|
---|
9633 | else
|
---|
9634 | # endif
|
---|
9635 | LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
|
---|
9636 | pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
|
---|
9637 | RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
|
---|
9638 | }
|
---|
9639 | #endif /* LOG_ENABLED */
|
---|
9640 |
|
---|
9641 |
|
---|
9642 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
9643 | /**
|
---|
9644 | * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
|
---|
9645 | * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
|
---|
9646 | *
|
---|
9647 | * @returns Modified rcStrict.
|
---|
9648 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
9649 | * @param rcStrict The instruction execution status.
|
---|
9650 | */
|
---|
9651 | static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
|
---|
9652 | {
|
---|
9653 | Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
|
---|
9654 | if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
|
---|
9655 | {
|
---|
9656 | /* VMX preemption timer takes priority over NMI-window exits. */
|
---|
9657 | if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
|
---|
9658 | {
|
---|
9659 | rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
|
---|
9660 | Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
|
---|
9661 | }
|
---|
9662 | /*
|
---|
9663 | * Check remaining intercepts.
|
---|
9664 | *
|
---|
9665 | * NMI-window and Interrupt-window VM-exits.
|
---|
9666 | * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
|
---|
9667 | * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
|
---|
9668 | *
|
---|
9669 | * See Intel spec. 26.7.6 "NMI-Window Exiting".
|
---|
9670 | * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
|
---|
9671 | */
|
---|
9672 | else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
|
---|
9673 | && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
|
---|
9674 | && !TRPMHasTrap(pVCpu))
|
---|
9675 | {
|
---|
9676 | Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
|
---|
9677 | if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
|
---|
9678 | && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
|
---|
9679 | {
|
---|
9680 | rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
|
---|
9681 | Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
|
---|
9682 | }
|
---|
9683 | else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
|
---|
9684 | && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
|
---|
9685 | {
|
---|
9686 | rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
|
---|
9687 | Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
|
---|
9688 | }
|
---|
9689 | }
|
---|
9690 | }
|
---|
9691 | /* TPR-below threshold/APIC write has the highest priority. */
|
---|
9692 | else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
|
---|
9693 | {
|
---|
9694 | rcStrict = iemVmxApicWriteEmulation(pVCpu);
|
---|
9695 | Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
|
---|
9696 | Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
|
---|
9697 | }
|
---|
9698 | /* MTF takes priority over VMX-preemption timer. */
|
---|
9699 | else
|
---|
9700 | {
|
---|
9701 | rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
|
---|
9702 | Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
|
---|
9703 | Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
|
---|
9704 | }
|
---|
9705 | return rcStrict;
|
---|
9706 | }
|
---|
9707 | #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
|
---|
9708 |
|
---|
9709 |
|
---|
9710 | /**
|
---|
9711 | * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
|
---|
9712 | * IEMExecOneWithPrefetchedByPC.
|
---|
9713 | *
|
---|
9714 | * Similar code is found in IEMExecLots.
|
---|
9715 | *
|
---|
9716 | * @return Strict VBox status code.
|
---|
9717 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
9718 | * @param fExecuteInhibit If set, execute the instruction following CLI,
|
---|
9719 | * POP SS and MOV SS,GR.
|
---|
9720 | * @param pszFunction The calling function name.
|
---|
9721 | */
|
---|
9722 | DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
|
---|
9723 | {
|
---|
9724 | AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
|
---|
9725 | AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
|
---|
9726 | AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
|
---|
9727 | RT_NOREF_PV(pszFunction);
|
---|
9728 |
|
---|
9729 | #ifdef IEM_WITH_SETJMP
|
---|
9730 | VBOXSTRICTRC rcStrict;
|
---|
9731 | IEM_TRY_SETJMP(pVCpu, rcStrict)
|
---|
9732 | {
|
---|
9733 | uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
|
---|
9734 | rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
|
---|
9735 | }
|
---|
9736 | IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
|
---|
9737 | {
|
---|
9738 | pVCpu->iem.s.cLongJumps++;
|
---|
9739 | }
|
---|
9740 | IEM_CATCH_LONGJMP_END(pVCpu);
|
---|
9741 | #else
|
---|
9742 | uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
|
---|
9743 | VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
|
---|
9744 | #endif
|
---|
9745 | if (rcStrict == VINF_SUCCESS)
|
---|
9746 | pVCpu->iem.s.cInstructions++;
|
---|
9747 | if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9748 | {
|
---|
9749 | Assert(rcStrict != VINF_SUCCESS);
|
---|
9750 | iemMemRollback(pVCpu);
|
---|
9751 | }
|
---|
9752 | AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
|
---|
9753 | AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
|
---|
9754 | AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
|
---|
9755 |
|
---|
9756 | //#ifdef DEBUG
|
---|
9757 | // AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
|
---|
9758 | //#endif
|
---|
9759 |
|
---|
9760 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
9761 | /*
|
---|
9762 | * Perform any VMX nested-guest instruction boundary actions.
|
---|
9763 | *
|
---|
9764 | * If any of these causes a VM-exit, we must skip executing the next
|
---|
9765 | * instruction (would run into stale page tables). A VM-exit makes sure
|
---|
9766 | * there is no interrupt-inhibition, so that should ensure we don't go
|
---|
9767 | * to try execute the next instruction. Clearing fExecuteInhibit is
|
---|
9768 | * problematic because of the setjmp/longjmp clobbering above.
|
---|
9769 | */
|
---|
9770 | if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
|
---|
9771 | | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
|
---|
9772 | || rcStrict != VINF_SUCCESS)
|
---|
9773 | { /* likely */ }
|
---|
9774 | else
|
---|
9775 | rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
|
---|
9776 | #endif
|
---|
9777 |
|
---|
9778 | /* Execute the next instruction as well if a cli, pop ss or
|
---|
9779 | mov ss, Gr has just completed successfully. */
|
---|
9780 | if ( fExecuteInhibit
|
---|
9781 | && rcStrict == VINF_SUCCESS
|
---|
9782 | && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
|
---|
9783 | {
|
---|
9784 | rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
|
---|
9785 | if (rcStrict == VINF_SUCCESS)
|
---|
9786 | {
|
---|
9787 | #ifdef LOG_ENABLED
|
---|
9788 | iemLogCurInstr(pVCpu, false, pszFunction);
|
---|
9789 | #endif
|
---|
9790 | #ifdef IEM_WITH_SETJMP
|
---|
9791 | IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
|
---|
9792 | {
|
---|
9793 | uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
|
---|
9794 | rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
|
---|
9795 | }
|
---|
9796 | IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
|
---|
9797 | {
|
---|
9798 | pVCpu->iem.s.cLongJumps++;
|
---|
9799 | }
|
---|
9800 | IEM_CATCH_LONGJMP_END(pVCpu);
|
---|
9801 | #else
|
---|
9802 | IEM_OPCODE_GET_FIRST_U8(&b);
|
---|
9803 | rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
|
---|
9804 | #endif
|
---|
9805 | if (rcStrict == VINF_SUCCESS)
|
---|
9806 | {
|
---|
9807 | pVCpu->iem.s.cInstructions++;
|
---|
9808 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
9809 | if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
|
---|
9810 | | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
|
---|
9811 | { /* likely */ }
|
---|
9812 | else
|
---|
9813 | rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
|
---|
9814 | #endif
|
---|
9815 | }
|
---|
9816 | if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9817 | {
|
---|
9818 | Assert(rcStrict != VINF_SUCCESS);
|
---|
9819 | iemMemRollback(pVCpu);
|
---|
9820 | }
|
---|
9821 | AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
|
---|
9822 | AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
|
---|
9823 | AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
|
---|
9824 | }
|
---|
9825 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9826 | iemMemRollback(pVCpu);
|
---|
9827 | /** @todo drop this after we bake this change into RIP advancing. */
|
---|
9828 | CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
|
---|
9829 | }
|
---|
9830 |
|
---|
9831 | /*
|
---|
9832 | * Return value fiddling, statistics and sanity assertions.
|
---|
9833 | */
|
---|
9834 | rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
9835 |
|
---|
9836 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
|
---|
9837 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
|
---|
9838 | return rcStrict;
|
---|
9839 | }
|
---|
9840 |
|
---|
9841 |
|
---|
9842 | /**
|
---|
9843 | * Execute one instruction.
|
---|
9844 | *
|
---|
9845 | * @return Strict VBox status code.
|
---|
9846 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
9847 | */
|
---|
9848 | VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
|
---|
9849 | {
|
---|
9850 | AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
|
---|
9851 | #ifdef LOG_ENABLED
|
---|
9852 | iemLogCurInstr(pVCpu, true, "IEMExecOne");
|
---|
9853 | #endif
|
---|
9854 |
|
---|
9855 | /*
|
---|
9856 | * Do the decoding and emulation.
|
---|
9857 | */
|
---|
9858 | VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
|
---|
9859 | if (rcStrict == VINF_SUCCESS)
|
---|
9860 | rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
|
---|
9861 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9862 | iemMemRollback(pVCpu);
|
---|
9863 |
|
---|
9864 | if (rcStrict != VINF_SUCCESS)
|
---|
9865 | LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
|
---|
9866 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
9867 | return rcStrict;
|
---|
9868 | }
|
---|
9869 |
|
---|
9870 |
|
---|
9871 | VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
|
---|
9872 | {
|
---|
9873 | uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
|
---|
9874 | VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
|
---|
9875 | if (rcStrict == VINF_SUCCESS)
|
---|
9876 | {
|
---|
9877 | rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
|
---|
9878 | if (pcbWritten)
|
---|
9879 | *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
|
---|
9880 | }
|
---|
9881 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9882 | iemMemRollback(pVCpu);
|
---|
9883 |
|
---|
9884 | return rcStrict;
|
---|
9885 | }
|
---|
9886 |
|
---|
9887 |
|
---|
9888 | VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
|
---|
9889 | const void *pvOpcodeBytes, size_t cbOpcodeBytes)
|
---|
9890 | {
|
---|
9891 | VBOXSTRICTRC rcStrict;
|
---|
9892 | if ( cbOpcodeBytes
|
---|
9893 | && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
|
---|
9894 | {
|
---|
9895 | iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
|
---|
9896 | #ifdef IEM_WITH_CODE_TLB
|
---|
9897 | pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
|
---|
9898 | pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
|
---|
9899 | pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
|
---|
9900 | pVCpu->iem.s.offCurInstrStart = 0;
|
---|
9901 | pVCpu->iem.s.offInstrNextByte = 0;
|
---|
9902 | pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
|
---|
9903 | #else
|
---|
9904 | pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
|
---|
9905 | memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
|
---|
9906 | #endif
|
---|
9907 | rcStrict = VINF_SUCCESS;
|
---|
9908 | }
|
---|
9909 | else
|
---|
9910 | rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
|
---|
9911 | if (rcStrict == VINF_SUCCESS)
|
---|
9912 | rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
|
---|
9913 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9914 | iemMemRollback(pVCpu);
|
---|
9915 |
|
---|
9916 | return rcStrict;
|
---|
9917 | }
|
---|
9918 |
|
---|
9919 |
|
---|
9920 | VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
|
---|
9921 | {
|
---|
9922 | uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
|
---|
9923 | VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
|
---|
9924 | if (rcStrict == VINF_SUCCESS)
|
---|
9925 | {
|
---|
9926 | rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
|
---|
9927 | if (pcbWritten)
|
---|
9928 | *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
|
---|
9929 | }
|
---|
9930 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9931 | iemMemRollback(pVCpu);
|
---|
9932 |
|
---|
9933 | return rcStrict;
|
---|
9934 | }
|
---|
9935 |
|
---|
9936 |
|
---|
9937 | VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
|
---|
9938 | const void *pvOpcodeBytes, size_t cbOpcodeBytes)
|
---|
9939 | {
|
---|
9940 | VBOXSTRICTRC rcStrict;
|
---|
9941 | if ( cbOpcodeBytes
|
---|
9942 | && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
|
---|
9943 | {
|
---|
9944 | iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
|
---|
9945 | #ifdef IEM_WITH_CODE_TLB
|
---|
9946 | pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
|
---|
9947 | pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
|
---|
9948 | pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
|
---|
9949 | pVCpu->iem.s.offCurInstrStart = 0;
|
---|
9950 | pVCpu->iem.s.offInstrNextByte = 0;
|
---|
9951 | pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
|
---|
9952 | #else
|
---|
9953 | pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
|
---|
9954 | memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
|
---|
9955 | #endif
|
---|
9956 | rcStrict = VINF_SUCCESS;
|
---|
9957 | }
|
---|
9958 | else
|
---|
9959 | rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
|
---|
9960 | if (rcStrict == VINF_SUCCESS)
|
---|
9961 | rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
|
---|
9962 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9963 | iemMemRollback(pVCpu);
|
---|
9964 |
|
---|
9965 | return rcStrict;
|
---|
9966 | }
|
---|
9967 |
|
---|
9968 |
|
---|
9969 | /**
|
---|
9970 | * For handling split cacheline lock operations when the host has split-lock
|
---|
9971 | * detection enabled.
|
---|
9972 | *
|
---|
9973 | * This will cause the interpreter to disregard the lock prefix and implicit
|
---|
9974 | * locking (xchg).
|
---|
9975 | *
|
---|
9976 | * @returns Strict VBox status code.
|
---|
9977 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
9978 | */
|
---|
9979 | VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
|
---|
9980 | {
|
---|
9981 | /*
|
---|
9982 | * Do the decoding and emulation.
|
---|
9983 | */
|
---|
9984 | VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
|
---|
9985 | if (rcStrict == VINF_SUCCESS)
|
---|
9986 | rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
|
---|
9987 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
9988 | iemMemRollback(pVCpu);
|
---|
9989 |
|
---|
9990 | if (rcStrict != VINF_SUCCESS)
|
---|
9991 | LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
|
---|
9992 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
9993 | return rcStrict;
|
---|
9994 | }
|
---|
9995 |
|
---|
9996 |
|
---|
9997 | /**
|
---|
9998 | * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
|
---|
9999 | * inject a pending TRPM trap.
|
---|
10000 | */
|
---|
10001 | VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
|
---|
10002 | {
|
---|
10003 | Assert(TRPMHasTrap(pVCpu));
|
---|
10004 |
|
---|
10005 | if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
|
---|
10006 | && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
|
---|
10007 | {
|
---|
10008 | /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
|
---|
10009 | #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
|
---|
10010 | bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
|
---|
10011 | if (fIntrEnabled)
|
---|
10012 | {
|
---|
10013 | if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
|
---|
10014 | fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
|
---|
10015 | else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
|
---|
10016 | fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
|
---|
10017 | else
|
---|
10018 | {
|
---|
10019 | Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
|
---|
10020 | fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
|
---|
10021 | }
|
---|
10022 | }
|
---|
10023 | #else
|
---|
10024 | bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
|
---|
10025 | #endif
|
---|
10026 | if (fIntrEnabled)
|
---|
10027 | {
|
---|
10028 | uint8_t u8TrapNo;
|
---|
10029 | TRPMEVENT enmType;
|
---|
10030 | uint32_t uErrCode;
|
---|
10031 | RTGCPTR uCr2;
|
---|
10032 | int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
|
---|
10033 | AssertRC(rc2);
|
---|
10034 | Assert(enmType == TRPM_HARDWARE_INT);
|
---|
10035 | VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
|
---|
10036 |
|
---|
10037 | TRPMResetTrap(pVCpu);
|
---|
10038 |
|
---|
10039 | #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
|
---|
10040 | /* Injecting an event may cause a VM-exit. */
|
---|
10041 | if ( rcStrict != VINF_SUCCESS
|
---|
10042 | && rcStrict != VINF_IEM_RAISED_XCPT)
|
---|
10043 | return iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
10044 | #else
|
---|
10045 | NOREF(rcStrict);
|
---|
10046 | #endif
|
---|
10047 | }
|
---|
10048 | }
|
---|
10049 |
|
---|
10050 | return VINF_SUCCESS;
|
---|
10051 | }
|
---|
10052 |
|
---|
10053 |
|
---|
10054 | VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
|
---|
10055 | {
|
---|
10056 | uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
|
---|
10057 | AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
|
---|
10058 | Assert(cMaxInstructions > 0);
|
---|
10059 |
|
---|
10060 | /*
|
---|
10061 | * See if there is an interrupt pending in TRPM, inject it if we can.
|
---|
10062 | */
|
---|
10063 | /** @todo What if we are injecting an exception and not an interrupt? Is that
|
---|
10064 | * possible here? For now we assert it is indeed only an interrupt. */
|
---|
10065 | if (!TRPMHasTrap(pVCpu))
|
---|
10066 | { /* likely */ }
|
---|
10067 | else
|
---|
10068 | {
|
---|
10069 | VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
|
---|
10070 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
10071 | { /*likely */ }
|
---|
10072 | else
|
---|
10073 | return rcStrict;
|
---|
10074 | }
|
---|
10075 |
|
---|
10076 | /*
|
---|
10077 | * Initial decoder init w/ prefetch, then setup setjmp.
|
---|
10078 | */
|
---|
10079 | VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
|
---|
10080 | if (rcStrict == VINF_SUCCESS)
|
---|
10081 | {
|
---|
10082 | #ifdef IEM_WITH_SETJMP
|
---|
10083 | pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
|
---|
10084 | IEM_TRY_SETJMP(pVCpu, rcStrict)
|
---|
10085 | #endif
|
---|
10086 | {
|
---|
10087 | /*
|
---|
10088 | * The run loop. We limit ourselves to 4096 instructions right now.
|
---|
10089 | */
|
---|
10090 | uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
|
---|
10091 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
10092 | for (;;)
|
---|
10093 | {
|
---|
10094 | /*
|
---|
10095 | * Log the state.
|
---|
10096 | */
|
---|
10097 | #ifdef LOG_ENABLED
|
---|
10098 | iemLogCurInstr(pVCpu, true, "IEMExecLots");
|
---|
10099 | #endif
|
---|
10100 |
|
---|
10101 | /*
|
---|
10102 | * Do the decoding and emulation.
|
---|
10103 | */
|
---|
10104 | uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
|
---|
10105 | rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
|
---|
10106 | #ifdef VBOX_STRICT
|
---|
10107 | CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
|
---|
10108 | #endif
|
---|
10109 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
10110 | {
|
---|
10111 | Assert(pVCpu->iem.s.cActiveMappings == 0);
|
---|
10112 | pVCpu->iem.s.cInstructions++;
|
---|
10113 |
|
---|
10114 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
10115 | /* Perform any VMX nested-guest instruction boundary actions. */
|
---|
10116 | uint64_t fCpu = pVCpu->fLocalForcedActions;
|
---|
10117 | if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
|
---|
10118 | | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
|
---|
10119 | { /* likely */ }
|
---|
10120 | else
|
---|
10121 | {
|
---|
10122 | rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
|
---|
10123 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
10124 | fCpu = pVCpu->fLocalForcedActions;
|
---|
10125 | else
|
---|
10126 | {
|
---|
10127 | rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
10128 | break;
|
---|
10129 | }
|
---|
10130 | }
|
---|
10131 | #endif
|
---|
10132 | if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
|
---|
10133 | {
|
---|
10134 | #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
10135 | uint64_t fCpu = pVCpu->fLocalForcedActions;
|
---|
10136 | #endif
|
---|
10137 | fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
|
---|
10138 | | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
|
---|
10139 | | VMCPU_FF_TLB_FLUSH
|
---|
10140 | | VMCPU_FF_UNHALT );
|
---|
10141 |
|
---|
10142 | if (RT_LIKELY( ( !fCpu
|
---|
10143 | || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
|
---|
10144 | && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
|
---|
10145 | && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
|
---|
10146 | {
|
---|
10147 | if (--cMaxInstructionsGccStupidity > 0)
|
---|
10148 | {
|
---|
10149 | /* Poll timers every now an then according to the caller's specs. */
|
---|
10150 | if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
|
---|
10151 | || !TMTimerPollBool(pVM, pVCpu))
|
---|
10152 | {
|
---|
10153 | Assert(pVCpu->iem.s.cActiveMappings == 0);
|
---|
10154 | iemReInitDecoder(pVCpu);
|
---|
10155 | continue;
|
---|
10156 | }
|
---|
10157 | }
|
---|
10158 | }
|
---|
10159 | }
|
---|
10160 | Assert(pVCpu->iem.s.cActiveMappings == 0);
|
---|
10161 | }
|
---|
10162 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
10163 | iemMemRollback(pVCpu);
|
---|
10164 | rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
10165 | break;
|
---|
10166 | }
|
---|
10167 | }
|
---|
10168 | #ifdef IEM_WITH_SETJMP
|
---|
10169 | IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
|
---|
10170 | {
|
---|
10171 | if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
10172 | iemMemRollback(pVCpu);
|
---|
10173 | # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
|
---|
10174 | rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
10175 | # endif
|
---|
10176 | pVCpu->iem.s.cLongJumps++;
|
---|
10177 | }
|
---|
10178 | IEM_CATCH_LONGJMP_END(pVCpu);
|
---|
10179 | #endif
|
---|
10180 |
|
---|
10181 | /*
|
---|
10182 | * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
|
---|
10183 | */
|
---|
10184 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
|
---|
10185 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
|
---|
10186 | }
|
---|
10187 | else
|
---|
10188 | {
|
---|
10189 | if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
10190 | iemMemRollback(pVCpu);
|
---|
10191 |
|
---|
10192 | #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
|
---|
10193 | /*
|
---|
10194 | * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
|
---|
10195 | * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
|
---|
10196 | */
|
---|
10197 | rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
10198 | #endif
|
---|
10199 | }
|
---|
10200 |
|
---|
10201 | /*
|
---|
10202 | * Maybe re-enter raw-mode and log.
|
---|
10203 | */
|
---|
10204 | if (rcStrict != VINF_SUCCESS)
|
---|
10205 | LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
|
---|
10206 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
|
---|
10207 | if (pcInstructions)
|
---|
10208 | *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
|
---|
10209 | return rcStrict;
|
---|
10210 | }
|
---|
10211 |
|
---|
10212 |
|
---|
10213 | /**
|
---|
10214 | * Interface used by EMExecuteExec, does exit statistics and limits.
|
---|
10215 | *
|
---|
10216 | * @returns Strict VBox status code.
|
---|
10217 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10218 | * @param fWillExit To be defined.
|
---|
10219 | * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
|
---|
10220 | * @param cMaxInstructions Maximum number of instructions to execute.
|
---|
10221 | * @param cMaxInstructionsWithoutExits
|
---|
10222 | * The max number of instructions without exits.
|
---|
10223 | * @param pStats Where to return statistics.
|
---|
10224 | */
|
---|
10225 | VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
|
---|
10226 | uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
|
---|
10227 | {
|
---|
10228 | NOREF(fWillExit); /** @todo define flexible exit crits */
|
---|
10229 |
|
---|
10230 | /*
|
---|
10231 | * Initialize return stats.
|
---|
10232 | */
|
---|
10233 | pStats->cInstructions = 0;
|
---|
10234 | pStats->cExits = 0;
|
---|
10235 | pStats->cMaxExitDistance = 0;
|
---|
10236 | pStats->cReserved = 0;
|
---|
10237 |
|
---|
10238 | /*
|
---|
10239 | * Initial decoder init w/ prefetch, then setup setjmp.
|
---|
10240 | */
|
---|
10241 | VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
|
---|
10242 | if (rcStrict == VINF_SUCCESS)
|
---|
10243 | {
|
---|
10244 | #ifdef IEM_WITH_SETJMP
|
---|
10245 | pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
|
---|
10246 | IEM_TRY_SETJMP(pVCpu, rcStrict)
|
---|
10247 | #endif
|
---|
10248 | {
|
---|
10249 | #ifdef IN_RING0
|
---|
10250 | bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
|
---|
10251 | #endif
|
---|
10252 | uint32_t cInstructionSinceLastExit = 0;
|
---|
10253 |
|
---|
10254 | /*
|
---|
10255 | * The run loop. We limit ourselves to 4096 instructions right now.
|
---|
10256 | */
|
---|
10257 | PVM pVM = pVCpu->CTX_SUFF(pVM);
|
---|
10258 | for (;;)
|
---|
10259 | {
|
---|
10260 | /*
|
---|
10261 | * Log the state.
|
---|
10262 | */
|
---|
10263 | #ifdef LOG_ENABLED
|
---|
10264 | iemLogCurInstr(pVCpu, true, "IEMExecForExits");
|
---|
10265 | #endif
|
---|
10266 |
|
---|
10267 | /*
|
---|
10268 | * Do the decoding and emulation.
|
---|
10269 | */
|
---|
10270 | uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
|
---|
10271 |
|
---|
10272 | uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
|
---|
10273 | rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
|
---|
10274 |
|
---|
10275 | if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
|
---|
10276 | && cInstructionSinceLastExit > 0 /* don't count the first */ )
|
---|
10277 | {
|
---|
10278 | pStats->cExits += 1;
|
---|
10279 | if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
|
---|
10280 | pStats->cMaxExitDistance = cInstructionSinceLastExit;
|
---|
10281 | cInstructionSinceLastExit = 0;
|
---|
10282 | }
|
---|
10283 |
|
---|
10284 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
10285 | {
|
---|
10286 | Assert(pVCpu->iem.s.cActiveMappings == 0);
|
---|
10287 | pVCpu->iem.s.cInstructions++;
|
---|
10288 | pStats->cInstructions++;
|
---|
10289 | cInstructionSinceLastExit++;
|
---|
10290 |
|
---|
10291 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
10292 | /* Perform any VMX nested-guest instruction boundary actions. */
|
---|
10293 | uint64_t fCpu = pVCpu->fLocalForcedActions;
|
---|
10294 | if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
|
---|
10295 | | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
|
---|
10296 | { /* likely */ }
|
---|
10297 | else
|
---|
10298 | {
|
---|
10299 | rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
|
---|
10300 | if (RT_LIKELY(rcStrict == VINF_SUCCESS))
|
---|
10301 | fCpu = pVCpu->fLocalForcedActions;
|
---|
10302 | else
|
---|
10303 | {
|
---|
10304 | rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
10305 | break;
|
---|
10306 | }
|
---|
10307 | }
|
---|
10308 | #endif
|
---|
10309 | if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
|
---|
10310 | {
|
---|
10311 | #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
10312 | uint64_t fCpu = pVCpu->fLocalForcedActions;
|
---|
10313 | #endif
|
---|
10314 | fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
|
---|
10315 | | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
|
---|
10316 | | VMCPU_FF_TLB_FLUSH
|
---|
10317 | | VMCPU_FF_UNHALT );
|
---|
10318 | if (RT_LIKELY( ( ( !fCpu
|
---|
10319 | || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
|
---|
10320 | && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
|
---|
10321 | && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
|
---|
10322 | || pStats->cInstructions < cMinInstructions))
|
---|
10323 | {
|
---|
10324 | if (pStats->cInstructions < cMaxInstructions)
|
---|
10325 | {
|
---|
10326 | if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
|
---|
10327 | {
|
---|
10328 | #ifdef IN_RING0
|
---|
10329 | if ( !fCheckPreemptionPending
|
---|
10330 | || !RTThreadPreemptIsPending(NIL_RTTHREAD))
|
---|
10331 | #endif
|
---|
10332 | {
|
---|
10333 | Assert(pVCpu->iem.s.cActiveMappings == 0);
|
---|
10334 | iemReInitDecoder(pVCpu);
|
---|
10335 | continue;
|
---|
10336 | }
|
---|
10337 | #ifdef IN_RING0
|
---|
10338 | rcStrict = VINF_EM_RAW_INTERRUPT;
|
---|
10339 | break;
|
---|
10340 | #endif
|
---|
10341 | }
|
---|
10342 | }
|
---|
10343 | }
|
---|
10344 | Assert(!(fCpu & VMCPU_FF_IEM));
|
---|
10345 | }
|
---|
10346 | Assert(pVCpu->iem.s.cActiveMappings == 0);
|
---|
10347 | }
|
---|
10348 | else if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
10349 | iemMemRollback(pVCpu);
|
---|
10350 | rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
10351 | break;
|
---|
10352 | }
|
---|
10353 | }
|
---|
10354 | #ifdef IEM_WITH_SETJMP
|
---|
10355 | IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
|
---|
10356 | {
|
---|
10357 | if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
10358 | iemMemRollback(pVCpu);
|
---|
10359 | pVCpu->iem.s.cLongJumps++;
|
---|
10360 | }
|
---|
10361 | IEM_CATCH_LONGJMP_END(pVCpu);
|
---|
10362 | #endif
|
---|
10363 |
|
---|
10364 | /*
|
---|
10365 | * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
|
---|
10366 | */
|
---|
10367 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
|
---|
10368 | Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
|
---|
10369 | }
|
---|
10370 | else
|
---|
10371 | {
|
---|
10372 | if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
10373 | iemMemRollback(pVCpu);
|
---|
10374 |
|
---|
10375 | #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
|
---|
10376 | /*
|
---|
10377 | * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
|
---|
10378 | * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
|
---|
10379 | */
|
---|
10380 | rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
|
---|
10381 | #endif
|
---|
10382 | }
|
---|
10383 |
|
---|
10384 | /*
|
---|
10385 | * Maybe re-enter raw-mode and log.
|
---|
10386 | */
|
---|
10387 | if (rcStrict != VINF_SUCCESS)
|
---|
10388 | LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
|
---|
10389 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
|
---|
10390 | pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
|
---|
10391 | return rcStrict;
|
---|
10392 | }
|
---|
10393 |
|
---|
10394 |
|
---|
10395 | /**
|
---|
10396 | * Injects a trap, fault, abort, software interrupt or external interrupt.
|
---|
10397 | *
|
---|
10398 | * The parameter list matches TRPMQueryTrapAll pretty closely.
|
---|
10399 | *
|
---|
10400 | * @returns Strict VBox status code.
|
---|
10401 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
10402 | * @param u8TrapNo The trap number.
|
---|
10403 | * @param enmType What type is it (trap/fault/abort), software
|
---|
10404 | * interrupt or hardware interrupt.
|
---|
10405 | * @param uErrCode The error code if applicable.
|
---|
10406 | * @param uCr2 The CR2 value if applicable.
|
---|
10407 | * @param cbInstr The instruction length (only relevant for
|
---|
10408 | * software interrupts).
|
---|
10409 | */
|
---|
10410 | VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
|
---|
10411 | uint8_t cbInstr)
|
---|
10412 | {
|
---|
10413 | iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
|
---|
10414 | #ifdef DBGFTRACE_ENABLED
|
---|
10415 | RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
|
---|
10416 | u8TrapNo, enmType, uErrCode, uCr2);
|
---|
10417 | #endif
|
---|
10418 |
|
---|
10419 | uint32_t fFlags;
|
---|
10420 | switch (enmType)
|
---|
10421 | {
|
---|
10422 | case TRPM_HARDWARE_INT:
|
---|
10423 | Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
|
---|
10424 | fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
|
---|
10425 | uErrCode = uCr2 = 0;
|
---|
10426 | break;
|
---|
10427 |
|
---|
10428 | case TRPM_SOFTWARE_INT:
|
---|
10429 | Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
|
---|
10430 | fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
|
---|
10431 | uErrCode = uCr2 = 0;
|
---|
10432 | break;
|
---|
10433 |
|
---|
10434 | case TRPM_TRAP:
|
---|
10435 | case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
|
---|
10436 | Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
|
---|
10437 | fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
|
---|
10438 | if (u8TrapNo == X86_XCPT_PF)
|
---|
10439 | fFlags |= IEM_XCPT_FLAGS_CR2;
|
---|
10440 | switch (u8TrapNo)
|
---|
10441 | {
|
---|
10442 | case X86_XCPT_DF:
|
---|
10443 | case X86_XCPT_TS:
|
---|
10444 | case X86_XCPT_NP:
|
---|
10445 | case X86_XCPT_SS:
|
---|
10446 | case X86_XCPT_PF:
|
---|
10447 | case X86_XCPT_AC:
|
---|
10448 | case X86_XCPT_GP:
|
---|
10449 | fFlags |= IEM_XCPT_FLAGS_ERR;
|
---|
10450 | break;
|
---|
10451 | }
|
---|
10452 | break;
|
---|
10453 |
|
---|
10454 | IEM_NOT_REACHED_DEFAULT_CASE_RET();
|
---|
10455 | }
|
---|
10456 |
|
---|
10457 | VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
|
---|
10458 |
|
---|
10459 | if (pVCpu->iem.s.cActiveMappings > 0)
|
---|
10460 | iemMemRollback(pVCpu);
|
---|
10461 |
|
---|
10462 | return rcStrict;
|
---|
10463 | }
|
---|
10464 |
|
---|
10465 |
|
---|
10466 | /**
|
---|
10467 | * Injects the active TRPM event.
|
---|
10468 | *
|
---|
10469 | * @returns Strict VBox status code.
|
---|
10470 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10471 | */
|
---|
10472 | VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
|
---|
10473 | {
|
---|
10474 | #ifndef IEM_IMPLEMENTS_TASKSWITCH
|
---|
10475 | IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
|
---|
10476 | #else
|
---|
10477 | uint8_t u8TrapNo;
|
---|
10478 | TRPMEVENT enmType;
|
---|
10479 | uint32_t uErrCode;
|
---|
10480 | RTGCUINTPTR uCr2;
|
---|
10481 | uint8_t cbInstr;
|
---|
10482 | int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
|
---|
10483 | if (RT_FAILURE(rc))
|
---|
10484 | return rc;
|
---|
10485 |
|
---|
10486 | /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
|
---|
10487 | * ICEBP \#DB injection as a special case. */
|
---|
10488 | VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
|
---|
10489 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
10490 | if (rcStrict == VINF_SVM_VMEXIT)
|
---|
10491 | rcStrict = VINF_SUCCESS;
|
---|
10492 | #endif
|
---|
10493 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
10494 | if (rcStrict == VINF_VMX_VMEXIT)
|
---|
10495 | rcStrict = VINF_SUCCESS;
|
---|
10496 | #endif
|
---|
10497 | /** @todo Are there any other codes that imply the event was successfully
|
---|
10498 | * delivered to the guest? See @bugref{6607}. */
|
---|
10499 | if ( rcStrict == VINF_SUCCESS
|
---|
10500 | || rcStrict == VINF_IEM_RAISED_XCPT)
|
---|
10501 | TRPMResetTrap(pVCpu);
|
---|
10502 |
|
---|
10503 | return rcStrict;
|
---|
10504 | #endif
|
---|
10505 | }
|
---|
10506 |
|
---|
10507 |
|
---|
10508 | VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
|
---|
10509 | {
|
---|
10510 | RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
|
---|
10511 | return VERR_NOT_IMPLEMENTED;
|
---|
10512 | }
|
---|
10513 |
|
---|
10514 |
|
---|
10515 | VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
|
---|
10516 | {
|
---|
10517 | RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
|
---|
10518 | return VERR_NOT_IMPLEMENTED;
|
---|
10519 | }
|
---|
10520 |
|
---|
10521 |
|
---|
10522 | /**
|
---|
10523 | * Interface for HM and EM for executing string I/O OUT (write) instructions.
|
---|
10524 | *
|
---|
10525 | * This API ASSUMES that the caller has already verified that the guest code is
|
---|
10526 | * allowed to access the I/O port. (The I/O port is in the DX register in the
|
---|
10527 | * guest state.)
|
---|
10528 | *
|
---|
10529 | * @returns Strict VBox status code.
|
---|
10530 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10531 | * @param cbValue The size of the I/O port access (1, 2, or 4).
|
---|
10532 | * @param enmAddrMode The addressing mode.
|
---|
10533 | * @param fRepPrefix Indicates whether a repeat prefix is used
|
---|
10534 | * (doesn't matter which for this instruction).
|
---|
10535 | * @param cbInstr The instruction length in bytes.
|
---|
10536 | * @param iEffSeg The effective segment address.
|
---|
10537 | * @param fIoChecked Whether the access to the I/O port has been
|
---|
10538 | * checked or not. It's typically checked in the
|
---|
10539 | * HM scenario.
|
---|
10540 | */
|
---|
10541 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
|
---|
10542 | bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
|
---|
10543 | {
|
---|
10544 | AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
|
---|
10545 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
|
---|
10546 |
|
---|
10547 | /*
|
---|
10548 | * State init.
|
---|
10549 | */
|
---|
10550 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10551 |
|
---|
10552 | /*
|
---|
10553 | * Switch orgy for getting to the right handler.
|
---|
10554 | */
|
---|
10555 | VBOXSTRICTRC rcStrict;
|
---|
10556 | if (fRepPrefix)
|
---|
10557 | {
|
---|
10558 | switch (enmAddrMode)
|
---|
10559 | {
|
---|
10560 | case IEMMODE_16BIT:
|
---|
10561 | switch (cbValue)
|
---|
10562 | {
|
---|
10563 | case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10564 | case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10565 | case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10566 | default:
|
---|
10567 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10568 | }
|
---|
10569 | break;
|
---|
10570 |
|
---|
10571 | case IEMMODE_32BIT:
|
---|
10572 | switch (cbValue)
|
---|
10573 | {
|
---|
10574 | case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10575 | case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10576 | case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10577 | default:
|
---|
10578 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10579 | }
|
---|
10580 | break;
|
---|
10581 |
|
---|
10582 | case IEMMODE_64BIT:
|
---|
10583 | switch (cbValue)
|
---|
10584 | {
|
---|
10585 | case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10586 | case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10587 | case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10588 | default:
|
---|
10589 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10590 | }
|
---|
10591 | break;
|
---|
10592 |
|
---|
10593 | default:
|
---|
10594 | AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
|
---|
10595 | }
|
---|
10596 | }
|
---|
10597 | else
|
---|
10598 | {
|
---|
10599 | switch (enmAddrMode)
|
---|
10600 | {
|
---|
10601 | case IEMMODE_16BIT:
|
---|
10602 | switch (cbValue)
|
---|
10603 | {
|
---|
10604 | case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10605 | case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10606 | case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10607 | default:
|
---|
10608 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10609 | }
|
---|
10610 | break;
|
---|
10611 |
|
---|
10612 | case IEMMODE_32BIT:
|
---|
10613 | switch (cbValue)
|
---|
10614 | {
|
---|
10615 | case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10616 | case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10617 | case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10618 | default:
|
---|
10619 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10620 | }
|
---|
10621 | break;
|
---|
10622 |
|
---|
10623 | case IEMMODE_64BIT:
|
---|
10624 | switch (cbValue)
|
---|
10625 | {
|
---|
10626 | case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10627 | case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10628 | case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
|
---|
10629 | default:
|
---|
10630 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10631 | }
|
---|
10632 | break;
|
---|
10633 |
|
---|
10634 | default:
|
---|
10635 | AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
|
---|
10636 | }
|
---|
10637 | }
|
---|
10638 |
|
---|
10639 | if (pVCpu->iem.s.cActiveMappings)
|
---|
10640 | iemMemRollback(pVCpu);
|
---|
10641 |
|
---|
10642 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10643 | }
|
---|
10644 |
|
---|
10645 |
|
---|
10646 | /**
|
---|
10647 | * Interface for HM and EM for executing string I/O IN (read) instructions.
|
---|
10648 | *
|
---|
10649 | * This API ASSUMES that the caller has already verified that the guest code is
|
---|
10650 | * allowed to access the I/O port. (The I/O port is in the DX register in the
|
---|
10651 | * guest state.)
|
---|
10652 | *
|
---|
10653 | * @returns Strict VBox status code.
|
---|
10654 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10655 | * @param cbValue The size of the I/O port access (1, 2, or 4).
|
---|
10656 | * @param enmAddrMode The addressing mode.
|
---|
10657 | * @param fRepPrefix Indicates whether a repeat prefix is used
|
---|
10658 | * (doesn't matter which for this instruction).
|
---|
10659 | * @param cbInstr The instruction length in bytes.
|
---|
10660 | * @param fIoChecked Whether the access to the I/O port has been
|
---|
10661 | * checked or not. It's typically checked in the
|
---|
10662 | * HM scenario.
|
---|
10663 | */
|
---|
10664 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
|
---|
10665 | bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
|
---|
10666 | {
|
---|
10667 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
|
---|
10668 |
|
---|
10669 | /*
|
---|
10670 | * State init.
|
---|
10671 | */
|
---|
10672 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10673 |
|
---|
10674 | /*
|
---|
10675 | * Switch orgy for getting to the right handler.
|
---|
10676 | */
|
---|
10677 | VBOXSTRICTRC rcStrict;
|
---|
10678 | if (fRepPrefix)
|
---|
10679 | {
|
---|
10680 | switch (enmAddrMode)
|
---|
10681 | {
|
---|
10682 | case IEMMODE_16BIT:
|
---|
10683 | switch (cbValue)
|
---|
10684 | {
|
---|
10685 | case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
|
---|
10686 | case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
|
---|
10687 | case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
|
---|
10688 | default:
|
---|
10689 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10690 | }
|
---|
10691 | break;
|
---|
10692 |
|
---|
10693 | case IEMMODE_32BIT:
|
---|
10694 | switch (cbValue)
|
---|
10695 | {
|
---|
10696 | case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
|
---|
10697 | case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
|
---|
10698 | case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
|
---|
10699 | default:
|
---|
10700 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10701 | }
|
---|
10702 | break;
|
---|
10703 |
|
---|
10704 | case IEMMODE_64BIT:
|
---|
10705 | switch (cbValue)
|
---|
10706 | {
|
---|
10707 | case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
|
---|
10708 | case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
|
---|
10709 | case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
|
---|
10710 | default:
|
---|
10711 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10712 | }
|
---|
10713 | break;
|
---|
10714 |
|
---|
10715 | default:
|
---|
10716 | AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
|
---|
10717 | }
|
---|
10718 | }
|
---|
10719 | else
|
---|
10720 | {
|
---|
10721 | switch (enmAddrMode)
|
---|
10722 | {
|
---|
10723 | case IEMMODE_16BIT:
|
---|
10724 | switch (cbValue)
|
---|
10725 | {
|
---|
10726 | case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
|
---|
10727 | case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
|
---|
10728 | case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
|
---|
10729 | default:
|
---|
10730 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10731 | }
|
---|
10732 | break;
|
---|
10733 |
|
---|
10734 | case IEMMODE_32BIT:
|
---|
10735 | switch (cbValue)
|
---|
10736 | {
|
---|
10737 | case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
|
---|
10738 | case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
|
---|
10739 | case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
|
---|
10740 | default:
|
---|
10741 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10742 | }
|
---|
10743 | break;
|
---|
10744 |
|
---|
10745 | case IEMMODE_64BIT:
|
---|
10746 | switch (cbValue)
|
---|
10747 | {
|
---|
10748 | case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
|
---|
10749 | case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
|
---|
10750 | case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
|
---|
10751 | default:
|
---|
10752 | AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
|
---|
10753 | }
|
---|
10754 | break;
|
---|
10755 |
|
---|
10756 | default:
|
---|
10757 | AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
|
---|
10758 | }
|
---|
10759 | }
|
---|
10760 |
|
---|
10761 | if ( pVCpu->iem.s.cActiveMappings == 0
|
---|
10762 | || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
|
---|
10763 | { /* likely */ }
|
---|
10764 | else
|
---|
10765 | {
|
---|
10766 | AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
|
---|
10767 | iemMemRollback(pVCpu);
|
---|
10768 | }
|
---|
10769 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10770 | }
|
---|
10771 |
|
---|
10772 |
|
---|
10773 | /**
|
---|
10774 | * Interface for rawmode to write execute an OUT instruction.
|
---|
10775 | *
|
---|
10776 | * @returns Strict VBox status code.
|
---|
10777 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10778 | * @param cbInstr The instruction length in bytes.
|
---|
10779 | * @param u16Port The port to read.
|
---|
10780 | * @param fImm Whether the port is specified using an immediate operand or
|
---|
10781 | * using the implicit DX register.
|
---|
10782 | * @param cbReg The register size.
|
---|
10783 | *
|
---|
10784 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10785 | */
|
---|
10786 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
|
---|
10787 | {
|
---|
10788 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
|
---|
10789 | Assert(cbReg <= 4 && cbReg != 3);
|
---|
10790 |
|
---|
10791 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10792 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
|
---|
10793 | ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
|
---|
10794 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10795 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10796 | }
|
---|
10797 |
|
---|
10798 |
|
---|
10799 | /**
|
---|
10800 | * Interface for rawmode to write execute an IN instruction.
|
---|
10801 | *
|
---|
10802 | * @returns Strict VBox status code.
|
---|
10803 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10804 | * @param cbInstr The instruction length in bytes.
|
---|
10805 | * @param u16Port The port to read.
|
---|
10806 | * @param fImm Whether the port is specified using an immediate operand or
|
---|
10807 | * using the implicit DX.
|
---|
10808 | * @param cbReg The register size.
|
---|
10809 | */
|
---|
10810 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
|
---|
10811 | {
|
---|
10812 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
|
---|
10813 | Assert(cbReg <= 4 && cbReg != 3);
|
---|
10814 |
|
---|
10815 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10816 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
|
---|
10817 | ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
|
---|
10818 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10819 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10820 | }
|
---|
10821 |
|
---|
10822 |
|
---|
10823 | /**
|
---|
10824 | * Interface for HM and EM to write to a CRx register.
|
---|
10825 | *
|
---|
10826 | * @returns Strict VBox status code.
|
---|
10827 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10828 | * @param cbInstr The instruction length in bytes.
|
---|
10829 | * @param iCrReg The control register number (destination).
|
---|
10830 | * @param iGReg The general purpose register number (source).
|
---|
10831 | *
|
---|
10832 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10833 | */
|
---|
10834 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
|
---|
10835 | {
|
---|
10836 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
10837 | Assert(iCrReg < 16);
|
---|
10838 | Assert(iGReg < 16);
|
---|
10839 |
|
---|
10840 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10841 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
|
---|
10842 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10843 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10844 | }
|
---|
10845 |
|
---|
10846 |
|
---|
10847 | /**
|
---|
10848 | * Interface for HM and EM to read from a CRx register.
|
---|
10849 | *
|
---|
10850 | * @returns Strict VBox status code.
|
---|
10851 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10852 | * @param cbInstr The instruction length in bytes.
|
---|
10853 | * @param iGReg The general purpose register number (destination).
|
---|
10854 | * @param iCrReg The control register number (source).
|
---|
10855 | *
|
---|
10856 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10857 | */
|
---|
10858 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
|
---|
10859 | {
|
---|
10860 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
10861 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
|
---|
10862 | | CPUMCTX_EXTRN_APIC_TPR);
|
---|
10863 | Assert(iCrReg < 16);
|
---|
10864 | Assert(iGReg < 16);
|
---|
10865 |
|
---|
10866 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10867 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
|
---|
10868 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10869 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10870 | }
|
---|
10871 |
|
---|
10872 |
|
---|
10873 | /**
|
---|
10874 | * Interface for HM and EM to write to a DRx register.
|
---|
10875 | *
|
---|
10876 | * @returns Strict VBox status code.
|
---|
10877 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10878 | * @param cbInstr The instruction length in bytes.
|
---|
10879 | * @param iDrReg The debug register number (destination).
|
---|
10880 | * @param iGReg The general purpose register number (source).
|
---|
10881 | *
|
---|
10882 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10883 | */
|
---|
10884 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
|
---|
10885 | {
|
---|
10886 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
10887 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
|
---|
10888 | Assert(iDrReg < 8);
|
---|
10889 | Assert(iGReg < 16);
|
---|
10890 |
|
---|
10891 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10892 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
|
---|
10893 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10894 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10895 | }
|
---|
10896 |
|
---|
10897 |
|
---|
10898 | /**
|
---|
10899 | * Interface for HM and EM to read from a DRx register.
|
---|
10900 | *
|
---|
10901 | * @returns Strict VBox status code.
|
---|
10902 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10903 | * @param cbInstr The instruction length in bytes.
|
---|
10904 | * @param iGReg The general purpose register number (destination).
|
---|
10905 | * @param iDrReg The debug register number (source).
|
---|
10906 | *
|
---|
10907 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10908 | */
|
---|
10909 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
|
---|
10910 | {
|
---|
10911 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
10912 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
|
---|
10913 | Assert(iDrReg < 8);
|
---|
10914 | Assert(iGReg < 16);
|
---|
10915 |
|
---|
10916 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10917 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
|
---|
10918 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10919 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10920 | }
|
---|
10921 |
|
---|
10922 |
|
---|
10923 | /**
|
---|
10924 | * Interface for HM and EM to clear the CR0[TS] bit.
|
---|
10925 | *
|
---|
10926 | * @returns Strict VBox status code.
|
---|
10927 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10928 | * @param cbInstr The instruction length in bytes.
|
---|
10929 | *
|
---|
10930 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10931 | */
|
---|
10932 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
10933 | {
|
---|
10934 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
10935 |
|
---|
10936 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10937 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
|
---|
10938 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10939 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10940 | }
|
---|
10941 |
|
---|
10942 |
|
---|
10943 | /**
|
---|
10944 | * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
|
---|
10945 | *
|
---|
10946 | * @returns Strict VBox status code.
|
---|
10947 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10948 | * @param cbInstr The instruction length in bytes.
|
---|
10949 | * @param uValue The value to load into CR0.
|
---|
10950 | * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
|
---|
10951 | * memory operand. Otherwise pass NIL_RTGCPTR.
|
---|
10952 | *
|
---|
10953 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10954 | */
|
---|
10955 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
|
---|
10956 | {
|
---|
10957 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
|
---|
10958 |
|
---|
10959 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10960 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
|
---|
10961 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10962 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10963 | }
|
---|
10964 |
|
---|
10965 |
|
---|
10966 | /**
|
---|
10967 | * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
|
---|
10968 | *
|
---|
10969 | * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
|
---|
10970 | *
|
---|
10971 | * @returns Strict VBox status code.
|
---|
10972 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
10973 | * @param cbInstr The instruction length in bytes.
|
---|
10974 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10975 | * @thread EMT(pVCpu)
|
---|
10976 | */
|
---|
10977 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
10978 | {
|
---|
10979 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
|
---|
10980 |
|
---|
10981 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
10982 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
|
---|
10983 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
10984 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
10985 | }
|
---|
10986 |
|
---|
10987 |
|
---|
10988 | /**
|
---|
10989 | * Interface for HM and EM to emulate the WBINVD instruction.
|
---|
10990 | *
|
---|
10991 | * @returns Strict VBox status code.
|
---|
10992 | * @param pVCpu The cross context virtual CPU structure.
|
---|
10993 | * @param cbInstr The instruction length in bytes.
|
---|
10994 | *
|
---|
10995 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
10996 | */
|
---|
10997 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
10998 | {
|
---|
10999 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
11000 |
|
---|
11001 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11002 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
|
---|
11003 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11004 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11005 | }
|
---|
11006 |
|
---|
11007 |
|
---|
11008 | /**
|
---|
11009 | * Interface for HM and EM to emulate the INVD instruction.
|
---|
11010 | *
|
---|
11011 | * @returns Strict VBox status code.
|
---|
11012 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11013 | * @param cbInstr The instruction length in bytes.
|
---|
11014 | *
|
---|
11015 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
11016 | */
|
---|
11017 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11018 | {
|
---|
11019 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
11020 |
|
---|
11021 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11022 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
|
---|
11023 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11024 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11025 | }
|
---|
11026 |
|
---|
11027 |
|
---|
11028 | /**
|
---|
11029 | * Interface for HM and EM to emulate the INVLPG instruction.
|
---|
11030 | *
|
---|
11031 | * @returns Strict VBox status code.
|
---|
11032 | * @retval VINF_PGM_SYNC_CR3
|
---|
11033 | *
|
---|
11034 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11035 | * @param cbInstr The instruction length in bytes.
|
---|
11036 | * @param GCPtrPage The effective address of the page to invalidate.
|
---|
11037 | *
|
---|
11038 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
11039 | */
|
---|
11040 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
|
---|
11041 | {
|
---|
11042 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
|
---|
11043 |
|
---|
11044 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11045 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
|
---|
11046 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11047 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11048 | }
|
---|
11049 |
|
---|
11050 |
|
---|
11051 | /**
|
---|
11052 | * Interface for HM and EM to emulate the INVPCID instruction.
|
---|
11053 | *
|
---|
11054 | * @returns Strict VBox status code.
|
---|
11055 | * @retval VINF_PGM_SYNC_CR3
|
---|
11056 | *
|
---|
11057 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11058 | * @param cbInstr The instruction length in bytes.
|
---|
11059 | * @param iEffSeg The effective segment register.
|
---|
11060 | * @param GCPtrDesc The effective address of the INVPCID descriptor.
|
---|
11061 | * @param uType The invalidation type.
|
---|
11062 | *
|
---|
11063 | * @remarks In ring-0 not all of the state needs to be synced in.
|
---|
11064 | */
|
---|
11065 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
|
---|
11066 | uint64_t uType)
|
---|
11067 | {
|
---|
11068 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
|
---|
11069 |
|
---|
11070 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11071 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
|
---|
11072 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11073 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11074 | }
|
---|
11075 |
|
---|
11076 |
|
---|
11077 | /**
|
---|
11078 | * Interface for HM and EM to emulate the CPUID instruction.
|
---|
11079 | *
|
---|
11080 | * @returns Strict VBox status code.
|
---|
11081 | *
|
---|
11082 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11083 | * @param cbInstr The instruction length in bytes.
|
---|
11084 | *
|
---|
11085 | * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
|
---|
11086 | */
|
---|
11087 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11088 | {
|
---|
11089 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
11090 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
|
---|
11091 |
|
---|
11092 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11093 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
|
---|
11094 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11095 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11096 | }
|
---|
11097 |
|
---|
11098 |
|
---|
11099 | /**
|
---|
11100 | * Interface for HM and EM to emulate the RDPMC instruction.
|
---|
11101 | *
|
---|
11102 | * @returns Strict VBox status code.
|
---|
11103 | *
|
---|
11104 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11105 | * @param cbInstr The instruction length in bytes.
|
---|
11106 | *
|
---|
11107 | * @remarks Not all of the state needs to be synced in.
|
---|
11108 | */
|
---|
11109 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11110 | {
|
---|
11111 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
11112 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
|
---|
11113 |
|
---|
11114 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11115 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
|
---|
11116 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11117 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11118 | }
|
---|
11119 |
|
---|
11120 |
|
---|
11121 | /**
|
---|
11122 | * Interface for HM and EM to emulate the RDTSC instruction.
|
---|
11123 | *
|
---|
11124 | * @returns Strict VBox status code.
|
---|
11125 | * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
|
---|
11126 | *
|
---|
11127 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11128 | * @param cbInstr The instruction length in bytes.
|
---|
11129 | *
|
---|
11130 | * @remarks Not all of the state needs to be synced in.
|
---|
11131 | */
|
---|
11132 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11133 | {
|
---|
11134 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
11135 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
|
---|
11136 |
|
---|
11137 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11138 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
|
---|
11139 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11140 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11141 | }
|
---|
11142 |
|
---|
11143 |
|
---|
11144 | /**
|
---|
11145 | * Interface for HM and EM to emulate the RDTSCP instruction.
|
---|
11146 | *
|
---|
11147 | * @returns Strict VBox status code.
|
---|
11148 | * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
|
---|
11149 | *
|
---|
11150 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11151 | * @param cbInstr The instruction length in bytes.
|
---|
11152 | *
|
---|
11153 | * @remarks Not all of the state needs to be synced in. Recommended
|
---|
11154 | * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
|
---|
11155 | */
|
---|
11156 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11157 | {
|
---|
11158 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
|
---|
11159 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
|
---|
11160 |
|
---|
11161 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11162 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
|
---|
11163 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11164 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11165 | }
|
---|
11166 |
|
---|
11167 |
|
---|
11168 | /**
|
---|
11169 | * Interface for HM and EM to emulate the RDMSR instruction.
|
---|
11170 | *
|
---|
11171 | * @returns Strict VBox status code.
|
---|
11172 | * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
|
---|
11173 | *
|
---|
11174 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11175 | * @param cbInstr The instruction length in bytes.
|
---|
11176 | *
|
---|
11177 | * @remarks Not all of the state needs to be synced in. Requires RCX and
|
---|
11178 | * (currently) all MSRs.
|
---|
11179 | */
|
---|
11180 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11181 | {
|
---|
11182 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
11183 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
|
---|
11184 |
|
---|
11185 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11186 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
|
---|
11187 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11188 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11189 | }
|
---|
11190 |
|
---|
11191 |
|
---|
11192 | /**
|
---|
11193 | * Interface for HM and EM to emulate the WRMSR instruction.
|
---|
11194 | *
|
---|
11195 | * @returns Strict VBox status code.
|
---|
11196 | * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
|
---|
11197 | *
|
---|
11198 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11199 | * @param cbInstr The instruction length in bytes.
|
---|
11200 | *
|
---|
11201 | * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
|
---|
11202 | * and (currently) all MSRs.
|
---|
11203 | */
|
---|
11204 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11205 | {
|
---|
11206 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
|
---|
11207 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
|
---|
11208 | | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
|
---|
11209 |
|
---|
11210 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11211 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
|
---|
11212 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11213 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11214 | }
|
---|
11215 |
|
---|
11216 |
|
---|
11217 | /**
|
---|
11218 | * Interface for HM and EM to emulate the MONITOR instruction.
|
---|
11219 | *
|
---|
11220 | * @returns Strict VBox status code.
|
---|
11221 | * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
|
---|
11222 | *
|
---|
11223 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11224 | * @param cbInstr The instruction length in bytes.
|
---|
11225 | *
|
---|
11226 | * @remarks Not all of the state needs to be synced in.
|
---|
11227 | * @remarks ASSUMES the default segment of DS and no segment override prefixes
|
---|
11228 | * are used.
|
---|
11229 | */
|
---|
11230 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11231 | {
|
---|
11232 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
|
---|
11233 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
|
---|
11234 |
|
---|
11235 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11236 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
|
---|
11237 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11238 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11239 | }
|
---|
11240 |
|
---|
11241 |
|
---|
11242 | /**
|
---|
11243 | * Interface for HM and EM to emulate the MWAIT instruction.
|
---|
11244 | *
|
---|
11245 | * @returns Strict VBox status code.
|
---|
11246 | * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
|
---|
11247 | *
|
---|
11248 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11249 | * @param cbInstr The instruction length in bytes.
|
---|
11250 | *
|
---|
11251 | * @remarks Not all of the state needs to be synced in.
|
---|
11252 | */
|
---|
11253 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11254 | {
|
---|
11255 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
|
---|
11256 | IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
|
---|
11257 |
|
---|
11258 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11259 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
|
---|
11260 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11261 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11262 | }
|
---|
11263 |
|
---|
11264 |
|
---|
11265 | /**
|
---|
11266 | * Interface for HM and EM to emulate the HLT instruction.
|
---|
11267 | *
|
---|
11268 | * @returns Strict VBox status code.
|
---|
11269 | * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
|
---|
11270 | *
|
---|
11271 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11272 | * @param cbInstr The instruction length in bytes.
|
---|
11273 | *
|
---|
11274 | * @remarks Not all of the state needs to be synced in.
|
---|
11275 | */
|
---|
11276 | VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
|
---|
11277 | {
|
---|
11278 | IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
|
---|
11279 |
|
---|
11280 | iemInitExec(pVCpu, 0 /*fExecOpts*/);
|
---|
11281 | VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
|
---|
11282 | Assert(!pVCpu->iem.s.cActiveMappings);
|
---|
11283 | return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
|
---|
11284 | }
|
---|
11285 |
|
---|
11286 |
|
---|
11287 | /**
|
---|
11288 | * Checks if IEM is in the process of delivering an event (interrupt or
|
---|
11289 | * exception).
|
---|
11290 | *
|
---|
11291 | * @returns true if we're in the process of raising an interrupt or exception,
|
---|
11292 | * false otherwise.
|
---|
11293 | * @param pVCpu The cross context virtual CPU structure.
|
---|
11294 | * @param puVector Where to store the vector associated with the
|
---|
11295 | * currently delivered event, optional.
|
---|
11296 | * @param pfFlags Where to store th event delivery flags (see
|
---|
11297 | * IEM_XCPT_FLAGS_XXX), optional.
|
---|
11298 | * @param puErr Where to store the error code associated with the
|
---|
11299 | * event, optional.
|
---|
11300 | * @param puCr2 Where to store the CR2 associated with the event,
|
---|
11301 | * optional.
|
---|
11302 | * @remarks The caller should check the flags to determine if the error code and
|
---|
11303 | * CR2 are valid for the event.
|
---|
11304 | */
|
---|
11305 | VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
|
---|
11306 | {
|
---|
11307 | bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
|
---|
11308 | if (fRaisingXcpt)
|
---|
11309 | {
|
---|
11310 | if (puVector)
|
---|
11311 | *puVector = pVCpu->iem.s.uCurXcpt;
|
---|
11312 | if (pfFlags)
|
---|
11313 | *pfFlags = pVCpu->iem.s.fCurXcpt;
|
---|
11314 | if (puErr)
|
---|
11315 | *puErr = pVCpu->iem.s.uCurXcptErr;
|
---|
11316 | if (puCr2)
|
---|
11317 | *puCr2 = pVCpu->iem.s.uCurXcptCr2;
|
---|
11318 | }
|
---|
11319 | return fRaisingXcpt;
|
---|
11320 | }
|
---|
11321 |
|
---|
11322 | #ifdef IN_RING3
|
---|
11323 |
|
---|
11324 | /**
|
---|
11325 | * Handles the unlikely and probably fatal merge cases.
|
---|
11326 | *
|
---|
11327 | * @returns Merged status code.
|
---|
11328 | * @param rcStrict Current EM status code.
|
---|
11329 | * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
|
---|
11330 | * with @a rcStrict.
|
---|
11331 | * @param iMemMap The memory mapping index. For error reporting only.
|
---|
11332 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
11333 | * thread, for error reporting only.
|
---|
11334 | */
|
---|
11335 | DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
|
---|
11336 | unsigned iMemMap, PVMCPUCC pVCpu)
|
---|
11337 | {
|
---|
11338 | if (RT_FAILURE_NP(rcStrict))
|
---|
11339 | return rcStrict;
|
---|
11340 |
|
---|
11341 | if (RT_FAILURE_NP(rcStrictCommit))
|
---|
11342 | return rcStrictCommit;
|
---|
11343 |
|
---|
11344 | if (rcStrict == rcStrictCommit)
|
---|
11345 | return rcStrictCommit;
|
---|
11346 |
|
---|
11347 | AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
|
---|
11348 | VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
|
---|
11349 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
|
---|
11350 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
|
---|
11351 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
|
---|
11352 | return VERR_IOM_FF_STATUS_IPE;
|
---|
11353 | }
|
---|
11354 |
|
---|
11355 |
|
---|
11356 | /**
|
---|
11357 | * Helper for IOMR3ProcessForceFlag.
|
---|
11358 | *
|
---|
11359 | * @returns Merged status code.
|
---|
11360 | * @param rcStrict Current EM status code.
|
---|
11361 | * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
|
---|
11362 | * with @a rcStrict.
|
---|
11363 | * @param iMemMap The memory mapping index. For error reporting only.
|
---|
11364 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
11365 | * thread, for error reporting only.
|
---|
11366 | */
|
---|
11367 | DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
|
---|
11368 | {
|
---|
11369 | /* Simple. */
|
---|
11370 | if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
|
---|
11371 | return rcStrictCommit;
|
---|
11372 |
|
---|
11373 | if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
|
---|
11374 | return rcStrict;
|
---|
11375 |
|
---|
11376 | /* EM scheduling status codes. */
|
---|
11377 | if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
|
---|
11378 | && rcStrict <= VINF_EM_LAST))
|
---|
11379 | {
|
---|
11380 | if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
|
---|
11381 | && rcStrictCommit <= VINF_EM_LAST))
|
---|
11382 | return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
|
---|
11383 | }
|
---|
11384 |
|
---|
11385 | /* Unlikely */
|
---|
11386 | return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
|
---|
11387 | }
|
---|
11388 |
|
---|
11389 |
|
---|
11390 | /**
|
---|
11391 | * Called by force-flag handling code when VMCPU_FF_IEM is set.
|
---|
11392 | *
|
---|
11393 | * @returns Merge between @a rcStrict and what the commit operation returned.
|
---|
11394 | * @param pVM The cross context VM structure.
|
---|
11395 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
11396 | * @param rcStrict The status code returned by ring-0 or raw-mode.
|
---|
11397 | */
|
---|
11398 | VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
|
---|
11399 | {
|
---|
11400 | /*
|
---|
11401 | * Reset the pending commit.
|
---|
11402 | */
|
---|
11403 | AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
|
---|
11404 | & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
|
---|
11405 | ("%#x %#x %#x\n",
|
---|
11406 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
|
---|
11407 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
|
---|
11408 |
|
---|
11409 | /*
|
---|
11410 | * Commit the pending bounce buffers (usually just one).
|
---|
11411 | */
|
---|
11412 | unsigned cBufs = 0;
|
---|
11413 | unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
|
---|
11414 | while (iMemMap-- > 0)
|
---|
11415 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
|
---|
11416 | {
|
---|
11417 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
|
---|
11418 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
|
---|
11419 | Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
|
---|
11420 |
|
---|
11421 | uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
|
---|
11422 | uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
11423 | uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
11424 |
|
---|
11425 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
|
---|
11426 | {
|
---|
11427 | VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
|
---|
11428 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
11429 | pbBuf,
|
---|
11430 | cbFirst,
|
---|
11431 | PGMACCESSORIGIN_IEM);
|
---|
11432 | rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
|
---|
11433 | Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
|
---|
11434 | iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
11435 | VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
|
---|
11436 | }
|
---|
11437 |
|
---|
11438 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
|
---|
11439 | {
|
---|
11440 | VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
|
---|
11441 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
11442 | pbBuf + cbFirst,
|
---|
11443 | cbSecond,
|
---|
11444 | PGMACCESSORIGIN_IEM);
|
---|
11445 | rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
|
---|
11446 | Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
|
---|
11447 | iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
|
---|
11448 | VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
|
---|
11449 | }
|
---|
11450 | cBufs++;
|
---|
11451 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
11452 | }
|
---|
11453 |
|
---|
11454 | AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
|
---|
11455 | ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
|
---|
11456 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
|
---|
11457 | pVCpu->iem.s.cActiveMappings = 0;
|
---|
11458 | return rcStrict;
|
---|
11459 | }
|
---|
11460 |
|
---|
11461 | #endif /* IN_RING3 */
|
---|
11462 |
|
---|